Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.653
      1 /*	$NetBSD: if_wm.c,v 1.653 2019/12/11 09:27:46 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.653 2019/12/11 09:27:46 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256U
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 	krndsource_t rnd_source;	/* random source */
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	u_short sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    592 
    593 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    594 	kmutex_t *sc_ich_phymtx;	/*
    595 					 * 82574/82583/ICH/PCH specific PHY
    596 					 * mutex. For 82574/82583, the mutex
    597 					 * is used for both PHY and NVM.
    598 					 */
    599 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    600 
    601 	struct wm_phyop phy;
    602 	struct wm_nvmop nvm;
    603 };
    604 
    605 #define WM_CORE_LOCK(_sc)						\
    606 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    607 #define WM_CORE_UNLOCK(_sc)						\
    608 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)						\
    610 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    611 
    612 #define	WM_RXCHAIN_RESET(rxq)						\
    613 do {									\
    614 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    615 	*(rxq)->rxq_tailp = NULL;					\
    616 	(rxq)->rxq_len = 0;						\
    617 } while (/*CONSTCOND*/0)
    618 
    619 #define	WM_RXCHAIN_LINK(rxq, m)						\
    620 do {									\
    621 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    622 	(rxq)->rxq_tailp = &(m)->m_next;				\
    623 } while (/*CONSTCOND*/0)
    624 
    625 #ifdef WM_EVENT_COUNTERS
    626 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    627 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    628 
    629 #define WM_Q_EVCNT_INCR(qname, evname)			\
    630 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    631 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    632 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    633 #else /* !WM_EVENT_COUNTERS */
    634 #define	WM_EVCNT_INCR(ev)	/* nothing */
    635 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    636 
    637 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    638 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    639 #endif /* !WM_EVENT_COUNTERS */
    640 
    641 #define	CSR_READ(sc, reg)						\
    642 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    643 #define	CSR_WRITE(sc, reg, val)						\
    644 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    645 #define	CSR_WRITE_FLUSH(sc)						\
    646 	(void)CSR_READ((sc), WMREG_STATUS)
    647 
    648 #define ICH8_FLASH_READ32(sc, reg)					\
    649 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    650 	    (reg) + sc->sc_flashreg_offset)
    651 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    652 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset, (data))
    654 
    655 #define ICH8_FLASH_READ16(sc, reg)					\
    656 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    657 	    (reg) + sc->sc_flashreg_offset)
    658 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    659 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    660 	    (reg) + sc->sc_flashreg_offset, (data))
    661 
    662 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    663 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    664 
    665 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    666 #define	WM_CDTXADDR_HI(txq, x)						\
    667 	(sizeof(bus_addr_t) == 8 ?					\
    668 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    669 
    670 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    671 #define	WM_CDRXADDR_HI(rxq, x)						\
    672 	(sizeof(bus_addr_t) == 8 ?					\
    673 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    674 
    675 /*
    676  * Register read/write functions.
    677  * Other than CSR_{READ|WRITE}().
    678  */
    679 #if 0
    680 static inline uint32_t wm_io_read(struct wm_softc *, int);
    681 #endif
    682 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    683 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    684     uint32_t, uint32_t);
    685 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    686 
    687 /*
    688  * Descriptor sync/init functions.
    689  */
    690 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    691 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    692 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    693 
    694 /*
    695  * Device driver interface functions and commonly used functions.
    696  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    697  */
    698 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    699 static int	wm_match(device_t, cfdata_t, void *);
    700 static void	wm_attach(device_t, device_t, void *);
    701 static int	wm_detach(device_t, int);
    702 static bool	wm_suspend(device_t, const pmf_qual_t *);
    703 static bool	wm_resume(device_t, const pmf_qual_t *);
    704 static void	wm_watchdog(struct ifnet *);
    705 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    706     uint16_t *);
    707 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    708     uint16_t *);
    709 static void	wm_tick(void *);
    710 static int	wm_ifflags_cb(struct ethercom *);
    711 static int	wm_ioctl(struct ifnet *, u_long, void *);
    712 /* MAC address related */
    713 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    714 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    715 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    716 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    717 static int	wm_rar_count(struct wm_softc *);
    718 static void	wm_set_filter(struct wm_softc *);
    719 /* Reset and init related */
    720 static void	wm_set_vlan(struct wm_softc *);
    721 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    722 static void	wm_get_auto_rd_done(struct wm_softc *);
    723 static void	wm_lan_init_done(struct wm_softc *);
    724 static void	wm_get_cfg_done(struct wm_softc *);
    725 static int	wm_phy_post_reset(struct wm_softc *);
    726 static int	wm_write_smbus_addr(struct wm_softc *);
    727 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    728 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    729 static void	wm_initialize_hardware_bits(struct wm_softc *);
    730 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    731 static int	wm_reset_phy(struct wm_softc *);
    732 static void	wm_flush_desc_rings(struct wm_softc *);
    733 static void	wm_reset(struct wm_softc *);
    734 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    735 static void	wm_rxdrain(struct wm_rxqueue *);
    736 static void	wm_init_rss(struct wm_softc *);
    737 static void	wm_adjust_qnum(struct wm_softc *, int);
    738 static inline bool	wm_is_using_msix(struct wm_softc *);
    739 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    740 static int	wm_softint_establish(struct wm_softc *, int, int);
    741 static int	wm_setup_legacy(struct wm_softc *);
    742 static int	wm_setup_msix(struct wm_softc *);
    743 static int	wm_init(struct ifnet *);
    744 static int	wm_init_locked(struct ifnet *);
    745 static void	wm_unset_stopping_flags(struct wm_softc *);
    746 static void	wm_set_stopping_flags(struct wm_softc *);
    747 static void	wm_stop(struct ifnet *, int);
    748 static void	wm_stop_locked(struct ifnet *, int);
    749 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    750 static void	wm_82547_txfifo_stall(void *);
    751 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    752 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    753 /* DMA related */
    754 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    758     struct wm_txqueue *);
    759 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    762     struct wm_rxqueue *);
    763 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    766 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    769 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    770     struct wm_txqueue *);
    771 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    772     struct wm_rxqueue *);
    773 static int	wm_alloc_txrx_queues(struct wm_softc *);
    774 static void	wm_free_txrx_queues(struct wm_softc *);
    775 static int	wm_init_txrx_queues(struct wm_softc *);
    776 /* Start */
    777 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    778     struct wm_txsoft *, uint32_t *, uint8_t *);
    779 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    780 static void	wm_start(struct ifnet *);
    781 static void	wm_start_locked(struct ifnet *);
    782 static int	wm_transmit(struct ifnet *, struct mbuf *);
    783 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    784 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    785     bool);
    786 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    787     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    788 static void	wm_nq_start(struct ifnet *);
    789 static void	wm_nq_start_locked(struct ifnet *);
    790 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    791 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    792 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    793     bool);
    794 static void	wm_deferred_start_locked(struct wm_txqueue *);
    795 static void	wm_handle_queue(void *);
    796 /* Interrupt */
    797 static bool	wm_txeof(struct wm_txqueue *, u_int);
    798 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    799 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    802 static void	wm_linkintr(struct wm_softc *, uint32_t);
    803 static int	wm_intr_legacy(void *);
    804 static inline void	wm_txrxintr_disable(struct wm_queue *);
    805 static inline void	wm_txrxintr_enable(struct wm_queue *);
    806 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    807 static int	wm_txrxintr_msix(void *);
    808 static int	wm_linkintr_msix(void *);
    809 
    810 /*
    811  * Media related.
    812  * GMII, SGMII, TBI, SERDES and SFP.
    813  */
    814 /* Common */
    815 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    816 /* GMII related */
    817 static void	wm_gmii_reset(struct wm_softc *);
    818 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    819 static int	wm_get_phy_id_82575(struct wm_softc *);
    820 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    821 static int	wm_gmii_mediachange(struct ifnet *);
    822 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    823 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    824 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    825 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    826 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    827 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    828 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    829 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    830 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    831 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    833 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    834 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    835 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    836 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    837 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    839 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    840 	bool);
    841 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    842 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    844 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    845 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    847 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    848 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    849 static void	wm_gmii_statchg(struct ifnet *);
    850 /*
    851  * kumeran related (80003, ICH* and PCH*).
    852  * These functions are not for accessing MII registers but for accessing
    853  * kumeran specific registers.
    854  */
    855 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    857 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    858 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    859 /* EMI register related */
    860 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    861 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    862 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    863 /* SGMII */
    864 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    865 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    866 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    867 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    868 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    869 /* TBI related */
    870 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    871 static void	wm_tbi_mediainit(struct wm_softc *);
    872 static int	wm_tbi_mediachange(struct ifnet *);
    873 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    874 static int	wm_check_for_link(struct wm_softc *);
    875 static void	wm_tbi_tick(struct wm_softc *);
    876 /* SERDES related */
    877 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    878 static int	wm_serdes_mediachange(struct ifnet *);
    879 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    880 static void	wm_serdes_tick(struct wm_softc *);
    881 /* SFP related */
    882 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    883 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    884 
    885 /*
    886  * NVM related.
    887  * Microwire, SPI (w/wo EERD) and Flash.
    888  */
    889 /* Misc functions */
    890 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    891 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    892 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    893 /* Microwire */
    894 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    895 /* SPI */
    896 static int	wm_nvm_ready_spi(struct wm_softc *);
    897 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    898 /* Using with EERD */
    899 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    900 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    901 /* Flash */
    902 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    903     unsigned int *);
    904 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    905 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    906 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    907     uint32_t *);
    908 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    909 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    910 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    911 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    912 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    913 /* iNVM */
    914 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    915 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    916 /* Lock, detecting NVM type, validate checksum and read */
    917 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    918 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    919 static int	wm_nvm_validate_checksum(struct wm_softc *);
    920 static void	wm_nvm_version_invm(struct wm_softc *);
    921 static void	wm_nvm_version(struct wm_softc *);
    922 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    923 
    924 /*
    925  * Hardware semaphores.
    926  * Very complexed...
    927  */
    928 static int	wm_get_null(struct wm_softc *);
    929 static void	wm_put_null(struct wm_softc *);
    930 static int	wm_get_eecd(struct wm_softc *);
    931 static void	wm_put_eecd(struct wm_softc *);
    932 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    933 static void	wm_put_swsm_semaphore(struct wm_softc *);
    934 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    936 static int	wm_get_nvm_80003(struct wm_softc *);
    937 static void	wm_put_nvm_80003(struct wm_softc *);
    938 static int	wm_get_nvm_82571(struct wm_softc *);
    939 static void	wm_put_nvm_82571(struct wm_softc *);
    940 static int	wm_get_phy_82575(struct wm_softc *);
    941 static void	wm_put_phy_82575(struct wm_softc *);
    942 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    943 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    944 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    945 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    946 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    947 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    948 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    949 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    950 
    951 /*
    952  * Management mode and power management related subroutines.
    953  * BMC, AMT, suspend/resume and EEE.
    954  */
    955 #if 0
    956 static int	wm_check_mng_mode(struct wm_softc *);
    957 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    958 static int	wm_check_mng_mode_82574(struct wm_softc *);
    959 static int	wm_check_mng_mode_generic(struct wm_softc *);
    960 #endif
    961 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    962 static bool	wm_phy_resetisblocked(struct wm_softc *);
    963 static void	wm_get_hw_control(struct wm_softc *);
    964 static void	wm_release_hw_control(struct wm_softc *);
    965 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    966 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    967 static void	wm_init_manageability(struct wm_softc *);
    968 static void	wm_release_manageability(struct wm_softc *);
    969 static void	wm_get_wakeup(struct wm_softc *);
    970 static int	wm_ulp_disable(struct wm_softc *);
    971 static int	wm_enable_phy_wakeup(struct wm_softc *);
    972 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    973 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    974 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    975 static void	wm_enable_wakeup(struct wm_softc *);
    976 static void	wm_disable_aspm(struct wm_softc *);
    977 /* LPLU (Low Power Link Up) */
    978 static void	wm_lplu_d0_disable(struct wm_softc *);
    979 /* EEE */
    980 static int	wm_set_eee_i350(struct wm_softc *);
    981 static int	wm_set_eee_pchlan(struct wm_softc *);
    982 static int	wm_set_eee(struct wm_softc *);
    983 
    984 /*
    985  * Workarounds (mainly PHY related).
    986  * Basically, PHY's workarounds are in the PHY drivers.
    987  */
    988 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    989 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    990 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    991 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    992 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    993 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    994 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    995 static int	wm_k1_workaround_lv(struct wm_softc *);
    996 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    997 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    998 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    999 static void	wm_reset_init_script_82575(struct wm_softc *);
   1000 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1001 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1002 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1003 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1004 static int	wm_pll_workaround_i210(struct wm_softc *);
   1005 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1006 
   1007 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1008     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1009 
   1010 /*
   1011  * Devices supported by this driver.
   1012  */
   1013 static const struct wm_product {
   1014 	pci_vendor_id_t		wmp_vendor;
   1015 	pci_product_id_t	wmp_product;
   1016 	const char		*wmp_name;
   1017 	wm_chip_type		wmp_type;
   1018 	uint32_t		wmp_flags;
   1019 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1020 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1021 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1022 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1023 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1024 } wm_products[] = {
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1026 	  "Intel i82542 1000BASE-X Ethernet",
   1027 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1030 	  "Intel i82543GC 1000BASE-X Ethernet",
   1031 	  WM_T_82543,		WMP_F_FIBER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1034 	  "Intel i82543GC 1000BASE-T Ethernet",
   1035 	  WM_T_82543,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1038 	  "Intel i82544EI 1000BASE-T Ethernet",
   1039 	  WM_T_82544,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1042 	  "Intel i82544EI 1000BASE-X Ethernet",
   1043 	  WM_T_82544,		WMP_F_FIBER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1046 	  "Intel i82544GC 1000BASE-T Ethernet",
   1047 	  WM_T_82544,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1050 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1051 	  WM_T_82544,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1054 	  "Intel i82540EM 1000BASE-T Ethernet",
   1055 	  WM_T_82540,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1058 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1059 	  WM_T_82540,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1062 	  "Intel i82540EP 1000BASE-T Ethernet",
   1063 	  WM_T_82540,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1066 	  "Intel i82540EP 1000BASE-T Ethernet",
   1067 	  WM_T_82540,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1070 	  "Intel i82540EP 1000BASE-T Ethernet",
   1071 	  WM_T_82540,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1074 	  "Intel i82545EM 1000BASE-T Ethernet",
   1075 	  WM_T_82545,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1078 	  "Intel i82545GM 1000BASE-T Ethernet",
   1079 	  WM_T_82545_3,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1082 	  "Intel i82545GM 1000BASE-X Ethernet",
   1083 	  WM_T_82545_3,		WMP_F_FIBER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1086 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1087 	  WM_T_82545_3,		WMP_F_SERDES },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1090 	  "Intel i82546EB 1000BASE-T Ethernet",
   1091 	  WM_T_82546,		WMP_F_COPPER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1094 	  "Intel i82546EB 1000BASE-T Ethernet",
   1095 	  WM_T_82546,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1098 	  "Intel i82545EM 1000BASE-X Ethernet",
   1099 	  WM_T_82545,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1102 	  "Intel i82546EB 1000BASE-X Ethernet",
   1103 	  WM_T_82546,		WMP_F_FIBER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1106 	  "Intel i82546GB 1000BASE-T Ethernet",
   1107 	  WM_T_82546_3,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1110 	  "Intel i82546GB 1000BASE-X Ethernet",
   1111 	  WM_T_82546_3,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1114 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1115 	  WM_T_82546_3,		WMP_F_SERDES },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1118 	  "i82546GB quad-port Gigabit Ethernet",
   1119 	  WM_T_82546_3,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1122 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1123 	  WM_T_82546_3,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1126 	  "Intel PRO/1000MT (82546GB)",
   1127 	  WM_T_82546_3,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1130 	  "Intel i82541EI 1000BASE-T Ethernet",
   1131 	  WM_T_82541,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1134 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1135 	  WM_T_82541,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1138 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1139 	  WM_T_82541,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1142 	  "Intel i82541ER 1000BASE-T Ethernet",
   1143 	  WM_T_82541_2,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1146 	  "Intel i82541GI 1000BASE-T Ethernet",
   1147 	  WM_T_82541_2,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1150 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1151 	  WM_T_82541_2,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1154 	  "Intel i82541PI 1000BASE-T Ethernet",
   1155 	  WM_T_82541_2,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1158 	  "Intel i82547EI 1000BASE-T Ethernet",
   1159 	  WM_T_82547,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1162 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1163 	  WM_T_82547,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1166 	  "Intel i82547GI 1000BASE-T Ethernet",
   1167 	  WM_T_82547_2,		WMP_F_COPPER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1170 	  "Intel PRO/1000 PT (82571EB)",
   1171 	  WM_T_82571,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1174 	  "Intel PRO/1000 PF (82571EB)",
   1175 	  WM_T_82571,		WMP_F_FIBER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1178 	  "Intel PRO/1000 PB (82571EB)",
   1179 	  WM_T_82571,		WMP_F_SERDES },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1182 	  "Intel PRO/1000 QT (82571EB)",
   1183 	  WM_T_82571,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1186 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1187 	  WM_T_82571,		WMP_F_COPPER, },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1190 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1191 	  WM_T_82571,		WMP_F_COPPER, },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1194 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1195 	  WM_T_82571,		WMP_F_SERDES, },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1198 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1199 	  WM_T_82571,		WMP_F_SERDES, },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1202 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1203 	  WM_T_82571,		WMP_F_FIBER, },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1206 	  "Intel i82572EI 1000baseT Ethernet",
   1207 	  WM_T_82572,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1210 	  "Intel i82572EI 1000baseX Ethernet",
   1211 	  WM_T_82572,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1214 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1215 	  WM_T_82572,		WMP_F_SERDES },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1218 	  "Intel i82572EI 1000baseT Ethernet",
   1219 	  WM_T_82572,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1222 	  "Intel i82573E",
   1223 	  WM_T_82573,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1226 	  "Intel i82573E IAMT",
   1227 	  WM_T_82573,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1230 	  "Intel i82573L Gigabit Ethernet",
   1231 	  WM_T_82573,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1234 	  "Intel i82574L",
   1235 	  WM_T_82574,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1238 	  "Intel i82574L",
   1239 	  WM_T_82574,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1242 	  "Intel i82583V",
   1243 	  WM_T_82583,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1246 	  "i80003 dual 1000baseT Ethernet",
   1247 	  WM_T_80003,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1250 	  "i80003 dual 1000baseX Ethernet",
   1251 	  WM_T_80003,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1254 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1255 	  WM_T_80003,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1258 	  "Intel i80003 1000baseT Ethernet",
   1259 	  WM_T_80003,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1262 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1263 	  WM_T_80003,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1266 	  "Intel i82801H (M_AMT) LAN Controller",
   1267 	  WM_T_ICH8,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1269 	  "Intel i82801H (AMT) LAN Controller",
   1270 	  WM_T_ICH8,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1272 	  "Intel i82801H LAN Controller",
   1273 	  WM_T_ICH8,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1275 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1276 	  WM_T_ICH8,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1278 	  "Intel i82801H (M) LAN Controller",
   1279 	  WM_T_ICH8,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1281 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1282 	  WM_T_ICH8,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1284 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1285 	  WM_T_ICH8,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1287 	  "82567V-3 LAN Controller",
   1288 	  WM_T_ICH8,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1290 	  "82801I (AMT) LAN Controller",
   1291 	  WM_T_ICH9,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1293 	  "82801I 10/100 LAN Controller",
   1294 	  WM_T_ICH9,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1296 	  "82801I (G) 10/100 LAN Controller",
   1297 	  WM_T_ICH9,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1299 	  "82801I (GT) 10/100 LAN Controller",
   1300 	  WM_T_ICH9,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1302 	  "82801I (C) LAN Controller",
   1303 	  WM_T_ICH9,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1305 	  "82801I mobile LAN Controller",
   1306 	  WM_T_ICH9,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1308 	  "82801I mobile (V) LAN Controller",
   1309 	  WM_T_ICH9,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1311 	  "82801I mobile (AMT) LAN Controller",
   1312 	  WM_T_ICH9,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1314 	  "82567LM-4 LAN Controller",
   1315 	  WM_T_ICH9,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1317 	  "82567LM-2 LAN Controller",
   1318 	  WM_T_ICH10,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1320 	  "82567LF-2 LAN Controller",
   1321 	  WM_T_ICH10,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1323 	  "82567LM-3 LAN Controller",
   1324 	  WM_T_ICH10,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1326 	  "82567LF-3 LAN Controller",
   1327 	  WM_T_ICH10,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1329 	  "82567V-2 LAN Controller",
   1330 	  WM_T_ICH10,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1332 	  "82567V-3? LAN Controller",
   1333 	  WM_T_ICH10,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1335 	  "HANKSVILLE LAN Controller",
   1336 	  WM_T_ICH10,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1338 	  "PCH LAN (82577LM) Controller",
   1339 	  WM_T_PCH,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1341 	  "PCH LAN (82577LC) Controller",
   1342 	  WM_T_PCH,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1344 	  "PCH LAN (82578DM) Controller",
   1345 	  WM_T_PCH,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1347 	  "PCH LAN (82578DC) Controller",
   1348 	  WM_T_PCH,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1350 	  "PCH2 LAN (82579LM) Controller",
   1351 	  WM_T_PCH2,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1353 	  "PCH2 LAN (82579V) Controller",
   1354 	  WM_T_PCH2,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1356 	  "82575EB dual-1000baseT Ethernet",
   1357 	  WM_T_82575,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1359 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1360 	  WM_T_82575,		WMP_F_SERDES },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1362 	  "82575GB quad-1000baseT Ethernet",
   1363 	  WM_T_82575,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1365 	  "82575GB quad-1000baseT Ethernet (PM)",
   1366 	  WM_T_82575,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1368 	  "82576 1000BaseT Ethernet",
   1369 	  WM_T_82576,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1371 	  "82576 1000BaseX Ethernet",
   1372 	  WM_T_82576,		WMP_F_FIBER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1375 	  "82576 gigabit Ethernet (SERDES)",
   1376 	  WM_T_82576,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1379 	  "82576 quad-1000BaseT Ethernet",
   1380 	  WM_T_82576,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1383 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1384 	  WM_T_82576,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1387 	  "82576 gigabit Ethernet",
   1388 	  WM_T_82576,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1391 	  "82576 gigabit Ethernet (SERDES)",
   1392 	  WM_T_82576,		WMP_F_SERDES },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1394 	  "82576 quad-gigabit Ethernet (SERDES)",
   1395 	  WM_T_82576,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1398 	  "82580 1000BaseT Ethernet",
   1399 	  WM_T_82580,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1401 	  "82580 1000BaseX Ethernet",
   1402 	  WM_T_82580,		WMP_F_FIBER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1405 	  "82580 1000BaseT Ethernet (SERDES)",
   1406 	  WM_T_82580,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1409 	  "82580 gigabit Ethernet (SGMII)",
   1410 	  WM_T_82580,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1412 	  "82580 dual-1000BaseT Ethernet",
   1413 	  WM_T_82580,		WMP_F_COPPER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1416 	  "82580 quad-1000BaseX Ethernet",
   1417 	  WM_T_82580,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1420 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1421 	  WM_T_82580,		WMP_F_COPPER },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1424 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1425 	  WM_T_82580,		WMP_F_SERDES },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1428 	  "DH89XXCC 1000BASE-KX Ethernet",
   1429 	  WM_T_82580,		WMP_F_SERDES },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1432 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1433 	  WM_T_82580,		WMP_F_SERDES },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1436 	  "I350 Gigabit Network Connection",
   1437 	  WM_T_I350,		WMP_F_COPPER },
   1438 
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1440 	  "I350 Gigabit Fiber Network Connection",
   1441 	  WM_T_I350,		WMP_F_FIBER },
   1442 
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1444 	  "I350 Gigabit Backplane Connection",
   1445 	  WM_T_I350,		WMP_F_SERDES },
   1446 
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1448 	  "I350 Quad Port Gigabit Ethernet",
   1449 	  WM_T_I350,		WMP_F_SERDES },
   1450 
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1452 	  "I350 Gigabit Connection",
   1453 	  WM_T_I350,		WMP_F_COPPER },
   1454 
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1456 	  "I354 Gigabit Ethernet (KX)",
   1457 	  WM_T_I354,		WMP_F_SERDES },
   1458 
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1460 	  "I354 Gigabit Ethernet (SGMII)",
   1461 	  WM_T_I354,		WMP_F_COPPER },
   1462 
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1464 	  "I354 Gigabit Ethernet (2.5G)",
   1465 	  WM_T_I354,		WMP_F_COPPER },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1468 	  "I210-T1 Ethernet Server Adapter",
   1469 	  WM_T_I210,		WMP_F_COPPER },
   1470 
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1472 	  "I210 Ethernet (Copper OEM)",
   1473 	  WM_T_I210,		WMP_F_COPPER },
   1474 
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1476 	  "I210 Ethernet (Copper IT)",
   1477 	  WM_T_I210,		WMP_F_COPPER },
   1478 
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1480 	  "I210 Ethernet (Copper, FLASH less)",
   1481 	  WM_T_I210,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1484 	  "I210 Gigabit Ethernet (Fiber)",
   1485 	  WM_T_I210,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1488 	  "I210 Gigabit Ethernet (SERDES)",
   1489 	  WM_T_I210,		WMP_F_SERDES },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1492 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1493 	  WM_T_I210,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1496 	  "I210 Gigabit Ethernet (SGMII)",
   1497 	  WM_T_I210,		WMP_F_COPPER },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1500 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1501 	  WM_T_I210,		WMP_F_COPPER },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1504 	  "I211 Ethernet (COPPER)",
   1505 	  WM_T_I211,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1507 	  "I217 V Ethernet Connection",
   1508 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1510 	  "I217 LM Ethernet Connection",
   1511 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1513 	  "I218 V Ethernet Connection",
   1514 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1516 	  "I218 V Ethernet Connection",
   1517 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1519 	  "I218 V Ethernet Connection",
   1520 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1522 	  "I218 LM Ethernet Connection",
   1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1525 	  "I218 LM Ethernet Connection",
   1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1528 	  "I218 LM Ethernet Connection",
   1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1531 	  "I219 LM Ethernet Connection",
   1532 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1537 	  "I219 LM Ethernet Connection",
   1538 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1540 	  "I219 LM Ethernet Connection",
   1541 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1543 	  "I219 LM Ethernet Connection",
   1544 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1549 	  "I219 LM Ethernet Connection",
   1550 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1552 	  "I219 LM Ethernet Connection",
   1553 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1555 	  "I219 LM Ethernet Connection",
   1556 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1558 	  "I219 V Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1561 	  "I219 V Ethernet Connection",
   1562 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1564 	  "I219 V Ethernet Connection",
   1565 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1567 	  "I219 V Ethernet Connection",
   1568 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1570 	  "I219 V Ethernet Connection",
   1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1573 	  "I219 V Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1576 	  "I219 V Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1579 	  "I219 V Ethernet Connection",
   1580 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1581 	{ 0,			0,
   1582 	  NULL,
   1583 	  0,			0 },
   1584 };
   1585 
   1586 /*
   1587  * Register read/write functions.
   1588  * Other than CSR_{READ|WRITE}().
   1589  */
   1590 
   1591 #if 0 /* Not currently used */
   1592 static inline uint32_t
   1593 wm_io_read(struct wm_softc *sc, int reg)
   1594 {
   1595 
   1596 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1597 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1598 }
   1599 #endif
   1600 
   1601 static inline void
   1602 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1603 {
   1604 
   1605 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1607 }
   1608 
   1609 static inline void
   1610 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1611     uint32_t data)
   1612 {
   1613 	uint32_t regval;
   1614 	int i;
   1615 
   1616 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1617 
   1618 	CSR_WRITE(sc, reg, regval);
   1619 
   1620 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1621 		delay(5);
   1622 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1623 			break;
   1624 	}
   1625 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1626 		aprint_error("%s: WARNING:"
   1627 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1628 		    device_xname(sc->sc_dev), reg);
   1629 	}
   1630 }
   1631 
   1632 static inline void
   1633 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1634 {
   1635 	wa->wa_low = htole32(v & 0xffffffffU);
   1636 	if (sizeof(bus_addr_t) == 8)
   1637 		wa->wa_high = htole32((uint64_t) v >> 32);
   1638 	else
   1639 		wa->wa_high = 0;
   1640 }
   1641 
   1642 /*
   1643  * Descriptor sync/init functions.
   1644  */
   1645 static inline void
   1646 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1647 {
   1648 	struct wm_softc *sc = txq->txq_sc;
   1649 
   1650 	/* If it will wrap around, sync to the end of the ring. */
   1651 	if ((start + num) > WM_NTXDESC(txq)) {
   1652 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1653 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1654 		    (WM_NTXDESC(txq) - start), ops);
   1655 		num -= (WM_NTXDESC(txq) - start);
   1656 		start = 0;
   1657 	}
   1658 
   1659 	/* Now sync whatever is left. */
   1660 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1661 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1662 }
   1663 
   1664 static inline void
   1665 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1666 {
   1667 	struct wm_softc *sc = rxq->rxq_sc;
   1668 
   1669 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1670 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1671 }
   1672 
   1673 static inline void
   1674 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1675 {
   1676 	struct wm_softc *sc = rxq->rxq_sc;
   1677 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1678 	struct mbuf *m = rxs->rxs_mbuf;
   1679 
   1680 	/*
   1681 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1682 	 * so that the payload after the Ethernet header is aligned
   1683 	 * to a 4-byte boundary.
   1684 
   1685 	 * XXX BRAINDAMAGE ALERT!
   1686 	 * The stupid chip uses the same size for every buffer, which
   1687 	 * is set in the Receive Control register.  We are using the 2K
   1688 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1689 	 * reason, we can't "scoot" packets longer than the standard
   1690 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1691 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1692 	 * the upper layer copy the headers.
   1693 	 */
   1694 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1695 
   1696 	if (sc->sc_type == WM_T_82574) {
   1697 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1698 		rxd->erx_data.erxd_addr =
   1699 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1700 		rxd->erx_data.erxd_dd = 0;
   1701 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1702 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1703 
   1704 		rxd->nqrx_data.nrxd_paddr =
   1705 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1706 		/* Currently, split header is not supported. */
   1707 		rxd->nqrx_data.nrxd_haddr = 0;
   1708 	} else {
   1709 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1710 
   1711 		wm_set_dma_addr(&rxd->wrx_addr,
   1712 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1713 		rxd->wrx_len = 0;
   1714 		rxd->wrx_cksum = 0;
   1715 		rxd->wrx_status = 0;
   1716 		rxd->wrx_errors = 0;
   1717 		rxd->wrx_special = 0;
   1718 	}
   1719 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1720 
   1721 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1722 }
   1723 
   1724 /*
   1725  * Device driver interface functions and commonly used functions.
   1726  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1727  */
   1728 
   1729 /* Lookup supported device table */
   1730 static const struct wm_product *
   1731 wm_lookup(const struct pci_attach_args *pa)
   1732 {
   1733 	const struct wm_product *wmp;
   1734 
   1735 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1736 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1737 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1738 			return wmp;
   1739 	}
   1740 	return NULL;
   1741 }
   1742 
   1743 /* The match function (ca_match) */
   1744 static int
   1745 wm_match(device_t parent, cfdata_t cf, void *aux)
   1746 {
   1747 	struct pci_attach_args *pa = aux;
   1748 
   1749 	if (wm_lookup(pa) != NULL)
   1750 		return 1;
   1751 
   1752 	return 0;
   1753 }
   1754 
   1755 /* The attach function (ca_attach) */
   1756 static void
   1757 wm_attach(device_t parent, device_t self, void *aux)
   1758 {
   1759 	struct wm_softc *sc = device_private(self);
   1760 	struct pci_attach_args *pa = aux;
   1761 	prop_dictionary_t dict;
   1762 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1763 	pci_chipset_tag_t pc = pa->pa_pc;
   1764 	int counts[PCI_INTR_TYPE_SIZE];
   1765 	pci_intr_type_t max_type;
   1766 	const char *eetype, *xname;
   1767 	bus_space_tag_t memt;
   1768 	bus_space_handle_t memh;
   1769 	bus_size_t memsize;
   1770 	int memh_valid;
   1771 	int i, error;
   1772 	const struct wm_product *wmp;
   1773 	prop_data_t ea;
   1774 	prop_number_t pn;
   1775 	uint8_t enaddr[ETHER_ADDR_LEN];
   1776 	char buf[256];
   1777 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1778 	pcireg_t preg, memtype;
   1779 	uint16_t eeprom_data, apme_mask;
   1780 	bool force_clear_smbi;
   1781 	uint32_t link_mode;
   1782 	uint32_t reg;
   1783 
   1784 	sc->sc_dev = self;
   1785 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1786 	sc->sc_core_stopping = false;
   1787 
   1788 	wmp = wm_lookup(pa);
   1789 #ifdef DIAGNOSTIC
   1790 	if (wmp == NULL) {
   1791 		printf("\n");
   1792 		panic("wm_attach: impossible");
   1793 	}
   1794 #endif
   1795 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1796 
   1797 	sc->sc_pc = pa->pa_pc;
   1798 	sc->sc_pcitag = pa->pa_tag;
   1799 
   1800 	if (pci_dma64_available(pa))
   1801 		sc->sc_dmat = pa->pa_dmat64;
   1802 	else
   1803 		sc->sc_dmat = pa->pa_dmat;
   1804 
   1805 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1806 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1807 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1808 
   1809 	sc->sc_type = wmp->wmp_type;
   1810 
   1811 	/* Set default function pointers */
   1812 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1813 	sc->phy.release = sc->nvm.release = wm_put_null;
   1814 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1815 
   1816 	if (sc->sc_type < WM_T_82543) {
   1817 		if (sc->sc_rev < 2) {
   1818 			aprint_error_dev(sc->sc_dev,
   1819 			    "i82542 must be at least rev. 2\n");
   1820 			return;
   1821 		}
   1822 		if (sc->sc_rev < 3)
   1823 			sc->sc_type = WM_T_82542_2_0;
   1824 	}
   1825 
   1826 	/*
   1827 	 * Disable MSI for Errata:
   1828 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1829 	 *
   1830 	 *  82544: Errata 25
   1831 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1832 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1833 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1834 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1835 	 *
   1836 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1837 	 *
   1838 	 *  82571 & 82572: Errata 63
   1839 	 */
   1840 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1841 	    || (sc->sc_type == WM_T_82572))
   1842 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1843 
   1844 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1845 	    || (sc->sc_type == WM_T_82580)
   1846 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1847 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1848 		sc->sc_flags |= WM_F_NEWQUEUE;
   1849 
   1850 	/* Set device properties (mactype) */
   1851 	dict = device_properties(sc->sc_dev);
   1852 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1853 
   1854 	/*
   1855 	 * Map the device.  All devices support memory-mapped acccess,
   1856 	 * and it is really required for normal operation.
   1857 	 */
   1858 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1859 	switch (memtype) {
   1860 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1862 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1863 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1864 		break;
   1865 	default:
   1866 		memh_valid = 0;
   1867 		break;
   1868 	}
   1869 
   1870 	if (memh_valid) {
   1871 		sc->sc_st = memt;
   1872 		sc->sc_sh = memh;
   1873 		sc->sc_ss = memsize;
   1874 	} else {
   1875 		aprint_error_dev(sc->sc_dev,
   1876 		    "unable to map device registers\n");
   1877 		return;
   1878 	}
   1879 
   1880 	/*
   1881 	 * In addition, i82544 and later support I/O mapped indirect
   1882 	 * register access.  It is not desirable (nor supported in
   1883 	 * this driver) to use it for normal operation, though it is
   1884 	 * required to work around bugs in some chip versions.
   1885 	 */
   1886 	if (sc->sc_type >= WM_T_82544) {
   1887 		/* First we have to find the I/O BAR. */
   1888 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1889 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1890 			if (memtype == PCI_MAPREG_TYPE_IO)
   1891 				break;
   1892 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1893 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1894 				i += 4;	/* skip high bits, too */
   1895 		}
   1896 		if (i < PCI_MAPREG_END) {
   1897 			/*
   1898 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1899 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1900 			 * It's no problem because newer chips has no this
   1901 			 * bug.
   1902 			 *
   1903 			 * The i8254x doesn't apparently respond when the
   1904 			 * I/O BAR is 0, which looks somewhat like it's not
   1905 			 * been configured.
   1906 			 */
   1907 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1908 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1909 				aprint_error_dev(sc->sc_dev,
   1910 				    "WARNING: I/O BAR at zero.\n");
   1911 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1912 					0, &sc->sc_iot, &sc->sc_ioh,
   1913 					NULL, &sc->sc_ios) == 0) {
   1914 				sc->sc_flags |= WM_F_IOH_VALID;
   1915 			} else
   1916 				aprint_error_dev(sc->sc_dev,
   1917 				    "WARNING: unable to map I/O space\n");
   1918 		}
   1919 
   1920 	}
   1921 
   1922 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1923 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1924 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1925 	if (sc->sc_type < WM_T_82542_2_1)
   1926 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1927 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1928 
   1929 	/* Power up chip */
   1930 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1931 	    && error != EOPNOTSUPP) {
   1932 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1933 		return;
   1934 	}
   1935 
   1936 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1937 	/*
   1938 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1939 	 * resource.
   1940 	 */
   1941 	if (sc->sc_nqueues > 1) {
   1942 		max_type = PCI_INTR_TYPE_MSIX;
   1943 		/*
   1944 		 *  82583 has a MSI-X capability in the PCI configuration space
   1945 		 * but it doesn't support it. At least the document doesn't
   1946 		 * say anything about MSI-X.
   1947 		 */
   1948 		counts[PCI_INTR_TYPE_MSIX]
   1949 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1950 	} else {
   1951 		max_type = PCI_INTR_TYPE_MSI;
   1952 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1953 	}
   1954 
   1955 	/* Allocation settings */
   1956 	counts[PCI_INTR_TYPE_MSI] = 1;
   1957 	counts[PCI_INTR_TYPE_INTX] = 1;
   1958 	/* overridden by disable flags */
   1959 	if (wm_disable_msi != 0) {
   1960 		counts[PCI_INTR_TYPE_MSI] = 0;
   1961 		if (wm_disable_msix != 0) {
   1962 			max_type = PCI_INTR_TYPE_INTX;
   1963 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1964 		}
   1965 	} else if (wm_disable_msix != 0) {
   1966 		max_type = PCI_INTR_TYPE_MSI;
   1967 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1968 	}
   1969 
   1970 alloc_retry:
   1971 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1972 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1973 		return;
   1974 	}
   1975 
   1976 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1977 		error = wm_setup_msix(sc);
   1978 		if (error) {
   1979 			pci_intr_release(pc, sc->sc_intrs,
   1980 			    counts[PCI_INTR_TYPE_MSIX]);
   1981 
   1982 			/* Setup for MSI: Disable MSI-X */
   1983 			max_type = PCI_INTR_TYPE_MSI;
   1984 			counts[PCI_INTR_TYPE_MSI] = 1;
   1985 			counts[PCI_INTR_TYPE_INTX] = 1;
   1986 			goto alloc_retry;
   1987 		}
   1988 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1989 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   1990 		error = wm_setup_legacy(sc);
   1991 		if (error) {
   1992 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1993 			    counts[PCI_INTR_TYPE_MSI]);
   1994 
   1995 			/* The next try is for INTx: Disable MSI */
   1996 			max_type = PCI_INTR_TYPE_INTX;
   1997 			counts[PCI_INTR_TYPE_INTX] = 1;
   1998 			goto alloc_retry;
   1999 		}
   2000 	} else {
   2001 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2002 		error = wm_setup_legacy(sc);
   2003 		if (error) {
   2004 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2005 			    counts[PCI_INTR_TYPE_INTX]);
   2006 			return;
   2007 		}
   2008 	}
   2009 
   2010 	/*
   2011 	 * Check the function ID (unit number of the chip).
   2012 	 */
   2013 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2014 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2015 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2016 	    || (sc->sc_type == WM_T_82580)
   2017 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2018 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2019 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2020 	else
   2021 		sc->sc_funcid = 0;
   2022 
   2023 	/*
   2024 	 * Determine a few things about the bus we're connected to.
   2025 	 */
   2026 	if (sc->sc_type < WM_T_82543) {
   2027 		/* We don't really know the bus characteristics here. */
   2028 		sc->sc_bus_speed = 33;
   2029 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2030 		/*
   2031 		 * CSA (Communication Streaming Architecture) is about as fast
   2032 		 * a 32-bit 66MHz PCI Bus.
   2033 		 */
   2034 		sc->sc_flags |= WM_F_CSA;
   2035 		sc->sc_bus_speed = 66;
   2036 		aprint_verbose_dev(sc->sc_dev,
   2037 		    "Communication Streaming Architecture\n");
   2038 		if (sc->sc_type == WM_T_82547) {
   2039 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2040 			callout_setfunc(&sc->sc_txfifo_ch,
   2041 			    wm_82547_txfifo_stall, sc);
   2042 			aprint_verbose_dev(sc->sc_dev,
   2043 			    "using 82547 Tx FIFO stall work-around\n");
   2044 		}
   2045 	} else if (sc->sc_type >= WM_T_82571) {
   2046 		sc->sc_flags |= WM_F_PCIE;
   2047 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2048 		    && (sc->sc_type != WM_T_ICH10)
   2049 		    && (sc->sc_type != WM_T_PCH)
   2050 		    && (sc->sc_type != WM_T_PCH2)
   2051 		    && (sc->sc_type != WM_T_PCH_LPT)
   2052 		    && (sc->sc_type != WM_T_PCH_SPT)
   2053 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2054 			/* ICH* and PCH* have no PCIe capability registers */
   2055 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2056 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2057 				NULL) == 0)
   2058 				aprint_error_dev(sc->sc_dev,
   2059 				    "unable to find PCIe capability\n");
   2060 		}
   2061 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2062 	} else {
   2063 		reg = CSR_READ(sc, WMREG_STATUS);
   2064 		if (reg & STATUS_BUS64)
   2065 			sc->sc_flags |= WM_F_BUS64;
   2066 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2067 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2068 
   2069 			sc->sc_flags |= WM_F_PCIX;
   2070 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2071 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2072 				aprint_error_dev(sc->sc_dev,
   2073 				    "unable to find PCIX capability\n");
   2074 			else if (sc->sc_type != WM_T_82545_3 &&
   2075 				 sc->sc_type != WM_T_82546_3) {
   2076 				/*
   2077 				 * Work around a problem caused by the BIOS
   2078 				 * setting the max memory read byte count
   2079 				 * incorrectly.
   2080 				 */
   2081 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2082 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2083 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2084 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2085 
   2086 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2087 				    PCIX_CMD_BYTECNT_SHIFT;
   2088 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2089 				    PCIX_STATUS_MAXB_SHIFT;
   2090 				if (bytecnt > maxb) {
   2091 					aprint_verbose_dev(sc->sc_dev,
   2092 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2093 					    512 << bytecnt, 512 << maxb);
   2094 					pcix_cmd = (pcix_cmd &
   2095 					    ~PCIX_CMD_BYTECNT_MASK) |
   2096 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2097 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2098 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2099 					    pcix_cmd);
   2100 				}
   2101 			}
   2102 		}
   2103 		/*
   2104 		 * The quad port adapter is special; it has a PCIX-PCIX
   2105 		 * bridge on the board, and can run the secondary bus at
   2106 		 * a higher speed.
   2107 		 */
   2108 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2109 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2110 								      : 66;
   2111 		} else if (sc->sc_flags & WM_F_PCIX) {
   2112 			switch (reg & STATUS_PCIXSPD_MASK) {
   2113 			case STATUS_PCIXSPD_50_66:
   2114 				sc->sc_bus_speed = 66;
   2115 				break;
   2116 			case STATUS_PCIXSPD_66_100:
   2117 				sc->sc_bus_speed = 100;
   2118 				break;
   2119 			case STATUS_PCIXSPD_100_133:
   2120 				sc->sc_bus_speed = 133;
   2121 				break;
   2122 			default:
   2123 				aprint_error_dev(sc->sc_dev,
   2124 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2125 				    reg & STATUS_PCIXSPD_MASK);
   2126 				sc->sc_bus_speed = 66;
   2127 				break;
   2128 			}
   2129 		} else
   2130 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2131 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2132 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2133 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2134 	}
   2135 
   2136 	/* clear interesting stat counters */
   2137 	CSR_READ(sc, WMREG_COLC);
   2138 	CSR_READ(sc, WMREG_RXERRC);
   2139 
   2140 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2141 	    || (sc->sc_type >= WM_T_ICH8))
   2142 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2143 	if (sc->sc_type >= WM_T_ICH8)
   2144 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2145 
   2146 	/* Set PHY, NVM mutex related stuff */
   2147 	switch (sc->sc_type) {
   2148 	case WM_T_82542_2_0:
   2149 	case WM_T_82542_2_1:
   2150 	case WM_T_82543:
   2151 	case WM_T_82544:
   2152 		/* Microwire */
   2153 		sc->nvm.read = wm_nvm_read_uwire;
   2154 		sc->sc_nvm_wordsize = 64;
   2155 		sc->sc_nvm_addrbits = 6;
   2156 		break;
   2157 	case WM_T_82540:
   2158 	case WM_T_82545:
   2159 	case WM_T_82545_3:
   2160 	case WM_T_82546:
   2161 	case WM_T_82546_3:
   2162 		/* Microwire */
   2163 		sc->nvm.read = wm_nvm_read_uwire;
   2164 		reg = CSR_READ(sc, WMREG_EECD);
   2165 		if (reg & EECD_EE_SIZE) {
   2166 			sc->sc_nvm_wordsize = 256;
   2167 			sc->sc_nvm_addrbits = 8;
   2168 		} else {
   2169 			sc->sc_nvm_wordsize = 64;
   2170 			sc->sc_nvm_addrbits = 6;
   2171 		}
   2172 		sc->sc_flags |= WM_F_LOCK_EECD;
   2173 		sc->nvm.acquire = wm_get_eecd;
   2174 		sc->nvm.release = wm_put_eecd;
   2175 		break;
   2176 	case WM_T_82541:
   2177 	case WM_T_82541_2:
   2178 	case WM_T_82547:
   2179 	case WM_T_82547_2:
   2180 		reg = CSR_READ(sc, WMREG_EECD);
   2181 		/*
   2182 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2183 		 * on 8254[17], so set flags and functios before calling it.
   2184 		 */
   2185 		sc->sc_flags |= WM_F_LOCK_EECD;
   2186 		sc->nvm.acquire = wm_get_eecd;
   2187 		sc->nvm.release = wm_put_eecd;
   2188 		if (reg & EECD_EE_TYPE) {
   2189 			/* SPI */
   2190 			sc->nvm.read = wm_nvm_read_spi;
   2191 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2192 			wm_nvm_set_addrbits_size_eecd(sc);
   2193 		} else {
   2194 			/* Microwire */
   2195 			sc->nvm.read = wm_nvm_read_uwire;
   2196 			if ((reg & EECD_EE_ABITS) != 0) {
   2197 				sc->sc_nvm_wordsize = 256;
   2198 				sc->sc_nvm_addrbits = 8;
   2199 			} else {
   2200 				sc->sc_nvm_wordsize = 64;
   2201 				sc->sc_nvm_addrbits = 6;
   2202 			}
   2203 		}
   2204 		break;
   2205 	case WM_T_82571:
   2206 	case WM_T_82572:
   2207 		/* SPI */
   2208 		sc->nvm.read = wm_nvm_read_eerd;
   2209 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2210 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2211 		wm_nvm_set_addrbits_size_eecd(sc);
   2212 		sc->phy.acquire = wm_get_swsm_semaphore;
   2213 		sc->phy.release = wm_put_swsm_semaphore;
   2214 		sc->nvm.acquire = wm_get_nvm_82571;
   2215 		sc->nvm.release = wm_put_nvm_82571;
   2216 		break;
   2217 	case WM_T_82573:
   2218 	case WM_T_82574:
   2219 	case WM_T_82583:
   2220 		sc->nvm.read = wm_nvm_read_eerd;
   2221 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2222 		if (sc->sc_type == WM_T_82573) {
   2223 			sc->phy.acquire = wm_get_swsm_semaphore;
   2224 			sc->phy.release = wm_put_swsm_semaphore;
   2225 			sc->nvm.acquire = wm_get_nvm_82571;
   2226 			sc->nvm.release = wm_put_nvm_82571;
   2227 		} else {
   2228 			/* Both PHY and NVM use the same semaphore. */
   2229 			sc->phy.acquire = sc->nvm.acquire
   2230 			    = wm_get_swfwhw_semaphore;
   2231 			sc->phy.release = sc->nvm.release
   2232 			    = wm_put_swfwhw_semaphore;
   2233 		}
   2234 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2235 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2236 			sc->sc_nvm_wordsize = 2048;
   2237 		} else {
   2238 			/* SPI */
   2239 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2240 			wm_nvm_set_addrbits_size_eecd(sc);
   2241 		}
   2242 		break;
   2243 	case WM_T_82575:
   2244 	case WM_T_82576:
   2245 	case WM_T_82580:
   2246 	case WM_T_I350:
   2247 	case WM_T_I354:
   2248 	case WM_T_80003:
   2249 		/* SPI */
   2250 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2251 		wm_nvm_set_addrbits_size_eecd(sc);
   2252 		if ((sc->sc_type == WM_T_80003)
   2253 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2254 			sc->nvm.read = wm_nvm_read_eerd;
   2255 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2256 		} else {
   2257 			sc->nvm.read = wm_nvm_read_spi;
   2258 			sc->sc_flags |= WM_F_LOCK_EECD;
   2259 		}
   2260 		sc->phy.acquire = wm_get_phy_82575;
   2261 		sc->phy.release = wm_put_phy_82575;
   2262 		sc->nvm.acquire = wm_get_nvm_80003;
   2263 		sc->nvm.release = wm_put_nvm_80003;
   2264 		break;
   2265 	case WM_T_ICH8:
   2266 	case WM_T_ICH9:
   2267 	case WM_T_ICH10:
   2268 	case WM_T_PCH:
   2269 	case WM_T_PCH2:
   2270 	case WM_T_PCH_LPT:
   2271 		sc->nvm.read = wm_nvm_read_ich8;
   2272 		/* FLASH */
   2273 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2274 		sc->sc_nvm_wordsize = 2048;
   2275 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2276 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2277 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2278 			aprint_error_dev(sc->sc_dev,
   2279 			    "can't map FLASH registers\n");
   2280 			goto out;
   2281 		}
   2282 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2283 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2284 		    ICH_FLASH_SECTOR_SIZE;
   2285 		sc->sc_ich8_flash_bank_size =
   2286 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2287 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2288 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2289 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2290 		sc->sc_flashreg_offset = 0;
   2291 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2292 		sc->phy.release = wm_put_swflag_ich8lan;
   2293 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2294 		sc->nvm.release = wm_put_nvm_ich8lan;
   2295 		break;
   2296 	case WM_T_PCH_SPT:
   2297 	case WM_T_PCH_CNP:
   2298 		sc->nvm.read = wm_nvm_read_spt;
   2299 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2300 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2301 		sc->sc_flasht = sc->sc_st;
   2302 		sc->sc_flashh = sc->sc_sh;
   2303 		sc->sc_ich8_flash_base = 0;
   2304 		sc->sc_nvm_wordsize =
   2305 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2306 		    * NVM_SIZE_MULTIPLIER;
   2307 		/* It is size in bytes, we want words */
   2308 		sc->sc_nvm_wordsize /= 2;
   2309 		/* Assume 2 banks */
   2310 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2311 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2312 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2313 		sc->phy.release = wm_put_swflag_ich8lan;
   2314 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2315 		sc->nvm.release = wm_put_nvm_ich8lan;
   2316 		break;
   2317 	case WM_T_I210:
   2318 	case WM_T_I211:
   2319 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2320 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2321 		if (wm_nvm_flash_presence_i210(sc)) {
   2322 			sc->nvm.read = wm_nvm_read_eerd;
   2323 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2324 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2325 			wm_nvm_set_addrbits_size_eecd(sc);
   2326 		} else {
   2327 			sc->nvm.read = wm_nvm_read_invm;
   2328 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2329 			sc->sc_nvm_wordsize = INVM_SIZE;
   2330 		}
   2331 		sc->phy.acquire = wm_get_phy_82575;
   2332 		sc->phy.release = wm_put_phy_82575;
   2333 		sc->nvm.acquire = wm_get_nvm_80003;
   2334 		sc->nvm.release = wm_put_nvm_80003;
   2335 		break;
   2336 	default:
   2337 		break;
   2338 	}
   2339 
   2340 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2341 	switch (sc->sc_type) {
   2342 	case WM_T_82571:
   2343 	case WM_T_82572:
   2344 		reg = CSR_READ(sc, WMREG_SWSM2);
   2345 		if ((reg & SWSM2_LOCK) == 0) {
   2346 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2347 			force_clear_smbi = true;
   2348 		} else
   2349 			force_clear_smbi = false;
   2350 		break;
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 		force_clear_smbi = true;
   2355 		break;
   2356 	default:
   2357 		force_clear_smbi = false;
   2358 		break;
   2359 	}
   2360 	if (force_clear_smbi) {
   2361 		reg = CSR_READ(sc, WMREG_SWSM);
   2362 		if ((reg & SWSM_SMBI) != 0)
   2363 			aprint_error_dev(sc->sc_dev,
   2364 			    "Please update the Bootagent\n");
   2365 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2366 	}
   2367 
   2368 	/*
   2369 	 * Defer printing the EEPROM type until after verifying the checksum
   2370 	 * This allows the EEPROM type to be printed correctly in the case
   2371 	 * that no EEPROM is attached.
   2372 	 */
   2373 	/*
   2374 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2375 	 * this for later, so we can fail future reads from the EEPROM.
   2376 	 */
   2377 	if (wm_nvm_validate_checksum(sc)) {
   2378 		/*
   2379 		 * Read twice again because some PCI-e parts fail the
   2380 		 * first check due to the link being in sleep state.
   2381 		 */
   2382 		if (wm_nvm_validate_checksum(sc))
   2383 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2384 	}
   2385 
   2386 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2387 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2388 	else {
   2389 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2390 		    sc->sc_nvm_wordsize);
   2391 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2392 			aprint_verbose("iNVM");
   2393 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2394 			aprint_verbose("FLASH(HW)");
   2395 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2396 			aprint_verbose("FLASH");
   2397 		else {
   2398 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2399 				eetype = "SPI";
   2400 			else
   2401 				eetype = "MicroWire";
   2402 			aprint_verbose("(%d address bits) %s EEPROM",
   2403 			    sc->sc_nvm_addrbits, eetype);
   2404 		}
   2405 	}
   2406 	wm_nvm_version(sc);
   2407 	aprint_verbose("\n");
   2408 
   2409 	/*
   2410 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2411 	 * incorrect.
   2412 	 */
   2413 	wm_gmii_setup_phytype(sc, 0, 0);
   2414 
   2415 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2416 	switch (sc->sc_type) {
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 	case WM_T_PCH_CNP:
   2425 		apme_mask = WUC_APME;
   2426 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2427 		if ((eeprom_data & apme_mask) != 0)
   2428 			sc->sc_flags |= WM_F_WOL;
   2429 		break;
   2430 	default:
   2431 		break;
   2432 	}
   2433 
   2434 	/* Reset the chip to a known state. */
   2435 	wm_reset(sc);
   2436 
   2437 	/*
   2438 	 * Check for I21[01] PLL workaround.
   2439 	 *
   2440 	 * Three cases:
   2441 	 * a) Chip is I211.
   2442 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2443 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2444 	 */
   2445 	if (sc->sc_type == WM_T_I211)
   2446 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2447 	if (sc->sc_type == WM_T_I210) {
   2448 		if (!wm_nvm_flash_presence_i210(sc))
   2449 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2450 		else if ((sc->sc_nvm_ver_major < 3)
   2451 		    || ((sc->sc_nvm_ver_major == 3)
   2452 			&& (sc->sc_nvm_ver_minor < 25))) {
   2453 			aprint_verbose_dev(sc->sc_dev,
   2454 			    "ROM image version %d.%d is older than 3.25\n",
   2455 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2456 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2457 		}
   2458 	}
   2459 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2460 		wm_pll_workaround_i210(sc);
   2461 
   2462 	wm_get_wakeup(sc);
   2463 
   2464 	/* Non-AMT based hardware can now take control from firmware */
   2465 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2466 		wm_get_hw_control(sc);
   2467 
   2468 	/*
   2469 	 * Read the Ethernet address from the EEPROM, if not first found
   2470 	 * in device properties.
   2471 	 */
   2472 	ea = prop_dictionary_get(dict, "mac-address");
   2473 	if (ea != NULL) {
   2474 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2475 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2476 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2477 	} else {
   2478 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2479 			aprint_error_dev(sc->sc_dev,
   2480 			    "unable to read Ethernet address\n");
   2481 			goto out;
   2482 		}
   2483 	}
   2484 
   2485 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2486 	    ether_sprintf(enaddr));
   2487 
   2488 	/*
   2489 	 * Read the config info from the EEPROM, and set up various
   2490 	 * bits in the control registers based on their contents.
   2491 	 */
   2492 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2493 	if (pn != NULL) {
   2494 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2495 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2496 	} else {
   2497 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2498 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2499 			goto out;
   2500 		}
   2501 	}
   2502 
   2503 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2504 	if (pn != NULL) {
   2505 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2506 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2507 	} else {
   2508 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2509 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2510 			goto out;
   2511 		}
   2512 	}
   2513 
   2514 	/* check for WM_F_WOL */
   2515 	switch (sc->sc_type) {
   2516 	case WM_T_82542_2_0:
   2517 	case WM_T_82542_2_1:
   2518 	case WM_T_82543:
   2519 		/* dummy? */
   2520 		eeprom_data = 0;
   2521 		apme_mask = NVM_CFG3_APME;
   2522 		break;
   2523 	case WM_T_82544:
   2524 		apme_mask = NVM_CFG2_82544_APM_EN;
   2525 		eeprom_data = cfg2;
   2526 		break;
   2527 	case WM_T_82546:
   2528 	case WM_T_82546_3:
   2529 	case WM_T_82571:
   2530 	case WM_T_82572:
   2531 	case WM_T_82573:
   2532 	case WM_T_82574:
   2533 	case WM_T_82583:
   2534 	case WM_T_80003:
   2535 	case WM_T_82575:
   2536 	case WM_T_82576:
   2537 		apme_mask = NVM_CFG3_APME;
   2538 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2539 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2540 		break;
   2541 	case WM_T_82580:
   2542 	case WM_T_I350:
   2543 	case WM_T_I354:
   2544 	case WM_T_I210:
   2545 	case WM_T_I211:
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc,
   2548 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2549 		    1, &eeprom_data);
   2550 		break;
   2551 	case WM_T_ICH8:
   2552 	case WM_T_ICH9:
   2553 	case WM_T_ICH10:
   2554 	case WM_T_PCH:
   2555 	case WM_T_PCH2:
   2556 	case WM_T_PCH_LPT:
   2557 	case WM_T_PCH_SPT:
   2558 	case WM_T_PCH_CNP:
   2559 		/* Already checked before wm_reset () */
   2560 		apme_mask = eeprom_data = 0;
   2561 		break;
   2562 	default: /* XXX 82540 */
   2563 		apme_mask = NVM_CFG3_APME;
   2564 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2565 		break;
   2566 	}
   2567 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2568 	if ((eeprom_data & apme_mask) != 0)
   2569 		sc->sc_flags |= WM_F_WOL;
   2570 
   2571 	/*
   2572 	 * We have the eeprom settings, now apply the special cases
   2573 	 * where the eeprom may be wrong or the board won't support
   2574 	 * wake on lan on a particular port
   2575 	 */
   2576 	switch (sc->sc_pcidevid) {
   2577 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2578 		sc->sc_flags &= ~WM_F_WOL;
   2579 		break;
   2580 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2581 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2582 		/* Wake events only supported on port A for dual fiber
   2583 		 * regardless of eeprom setting */
   2584 		if (sc->sc_funcid == 1)
   2585 			sc->sc_flags &= ~WM_F_WOL;
   2586 		break;
   2587 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2588 		/* If quad port adapter, disable WoL on all but port A */
   2589 		if (sc->sc_funcid != 0)
   2590 			sc->sc_flags &= ~WM_F_WOL;
   2591 		break;
   2592 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2593 		/* Wake events only supported on port A for dual fiber
   2594 		 * regardless of eeprom setting */
   2595 		if (sc->sc_funcid == 1)
   2596 			sc->sc_flags &= ~WM_F_WOL;
   2597 		break;
   2598 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2600 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2601 		/* If quad port adapter, disable WoL on all but port A */
   2602 		if (sc->sc_funcid != 0)
   2603 			sc->sc_flags &= ~WM_F_WOL;
   2604 		break;
   2605 	}
   2606 
   2607 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2608 		/* Check NVM for autonegotiation */
   2609 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2610 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2611 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2612 		}
   2613 	}
   2614 
   2615 	/*
   2616 	 * XXX need special handling for some multiple port cards
   2617 	 * to disable a paticular port.
   2618 	 */
   2619 
   2620 	if (sc->sc_type >= WM_T_82544) {
   2621 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2622 		if (pn != NULL) {
   2623 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2624 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2625 		} else {
   2626 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2627 				aprint_error_dev(sc->sc_dev,
   2628 				    "unable to read SWDPIN\n");
   2629 				goto out;
   2630 			}
   2631 		}
   2632 	}
   2633 
   2634 	if (cfg1 & NVM_CFG1_ILOS)
   2635 		sc->sc_ctrl |= CTRL_ILOS;
   2636 
   2637 	/*
   2638 	 * XXX
   2639 	 * This code isn't correct because pin 2 and 3 are located
   2640 	 * in different position on newer chips. Check all datasheet.
   2641 	 *
   2642 	 * Until resolve this problem, check if a chip < 82580
   2643 	 */
   2644 	if (sc->sc_type <= WM_T_82580) {
   2645 		if (sc->sc_type >= WM_T_82544) {
   2646 			sc->sc_ctrl |=
   2647 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2648 			    CTRL_SWDPIO_SHIFT;
   2649 			sc->sc_ctrl |=
   2650 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2651 			    CTRL_SWDPINS_SHIFT;
   2652 		} else {
   2653 			sc->sc_ctrl |=
   2654 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2655 			    CTRL_SWDPIO_SHIFT;
   2656 		}
   2657 	}
   2658 
   2659 	/* XXX For other than 82580? */
   2660 	if (sc->sc_type == WM_T_82580) {
   2661 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2662 		if (nvmword & __BIT(13))
   2663 			sc->sc_ctrl |= CTRL_ILOS;
   2664 	}
   2665 
   2666 #if 0
   2667 	if (sc->sc_type >= WM_T_82544) {
   2668 		if (cfg1 & NVM_CFG1_IPS0)
   2669 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2670 		if (cfg1 & NVM_CFG1_IPS1)
   2671 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2672 		sc->sc_ctrl_ext |=
   2673 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2674 		    CTRL_EXT_SWDPIO_SHIFT;
   2675 		sc->sc_ctrl_ext |=
   2676 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2677 		    CTRL_EXT_SWDPINS_SHIFT;
   2678 	} else {
   2679 		sc->sc_ctrl_ext |=
   2680 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2681 		    CTRL_EXT_SWDPIO_SHIFT;
   2682 	}
   2683 #endif
   2684 
   2685 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2686 #if 0
   2687 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2688 #endif
   2689 
   2690 	if (sc->sc_type == WM_T_PCH) {
   2691 		uint16_t val;
   2692 
   2693 		/* Save the NVM K1 bit setting */
   2694 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2695 
   2696 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2697 			sc->sc_nvm_k1_enabled = 1;
   2698 		else
   2699 			sc->sc_nvm_k1_enabled = 0;
   2700 	}
   2701 
   2702 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2703 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2704 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2705 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2706 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2707 	    || sc->sc_type == WM_T_82573
   2708 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2709 		/* Copper only */
   2710 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2711 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2712 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2713 	    || (sc->sc_type ==WM_T_I211)) {
   2714 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2715 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2716 		switch (link_mode) {
   2717 		case CTRL_EXT_LINK_MODE_1000KX:
   2718 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2719 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2720 			break;
   2721 		case CTRL_EXT_LINK_MODE_SGMII:
   2722 			if (wm_sgmii_uses_mdio(sc)) {
   2723 				aprint_verbose_dev(sc->sc_dev,
   2724 				    "SGMII(MDIO)\n");
   2725 				sc->sc_flags |= WM_F_SGMII;
   2726 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2727 				break;
   2728 			}
   2729 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2730 			/*FALLTHROUGH*/
   2731 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2732 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2733 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2734 				if (link_mode
   2735 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2736 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2737 					sc->sc_flags |= WM_F_SGMII;
   2738 				} else {
   2739 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2740 					aprint_verbose_dev(sc->sc_dev,
   2741 					    "SERDES\n");
   2742 				}
   2743 				break;
   2744 			}
   2745 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2746 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2747 
   2748 			/* Change current link mode setting */
   2749 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2750 			switch (sc->sc_mediatype) {
   2751 			case WM_MEDIATYPE_COPPER:
   2752 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2753 				break;
   2754 			case WM_MEDIATYPE_SERDES:
   2755 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2756 				break;
   2757 			default:
   2758 				break;
   2759 			}
   2760 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2761 			break;
   2762 		case CTRL_EXT_LINK_MODE_GMII:
   2763 		default:
   2764 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2765 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2766 			break;
   2767 		}
   2768 
   2769 		reg &= ~CTRL_EXT_I2C_ENA;
   2770 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2771 			reg |= CTRL_EXT_I2C_ENA;
   2772 		else
   2773 			reg &= ~CTRL_EXT_I2C_ENA;
   2774 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2775 	} else if (sc->sc_type < WM_T_82543 ||
   2776 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2777 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2778 			aprint_error_dev(sc->sc_dev,
   2779 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2780 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2781 		}
   2782 	} else {
   2783 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2784 			aprint_error_dev(sc->sc_dev,
   2785 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2786 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2787 		}
   2788 	}
   2789 
   2790 	if (sc->sc_type >= WM_T_PCH2)
   2791 		sc->sc_flags |= WM_F_EEE;
   2792 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2793 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2794 		/* XXX: Need special handling for I354. (not yet) */
   2795 		if (sc->sc_type != WM_T_I354)
   2796 			sc->sc_flags |= WM_F_EEE;
   2797 	}
   2798 
   2799 	/* Set device properties (macflags) */
   2800 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2801 
   2802 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2803 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2804 
   2805 	/* Initialize the media structures accordingly. */
   2806 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2807 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2808 	else
   2809 		wm_tbi_mediainit(sc); /* All others */
   2810 
   2811 	ifp = &sc->sc_ethercom.ec_if;
   2812 	xname = device_xname(sc->sc_dev);
   2813 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2814 	ifp->if_softc = sc;
   2815 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2816 #ifdef WM_MPSAFE
   2817 	ifp->if_extflags = IFEF_MPSAFE;
   2818 #endif
   2819 	ifp->if_ioctl = wm_ioctl;
   2820 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2821 		ifp->if_start = wm_nq_start;
   2822 		/*
   2823 		 * When the number of CPUs is one and the controller can use
   2824 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2825 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2826 		 * and the other is used for link status changing.
   2827 		 * In this situation, wm_nq_transmit() is disadvantageous
   2828 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2829 		 */
   2830 		if (wm_is_using_multiqueue(sc))
   2831 			ifp->if_transmit = wm_nq_transmit;
   2832 	} else {
   2833 		ifp->if_start = wm_start;
   2834 		/*
   2835 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2836 		 */
   2837 		if (wm_is_using_multiqueue(sc))
   2838 			ifp->if_transmit = wm_transmit;
   2839 	}
   2840 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2841 	ifp->if_init = wm_init;
   2842 	ifp->if_stop = wm_stop;
   2843 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2844 	IFQ_SET_READY(&ifp->if_snd);
   2845 
   2846 	/* Check for jumbo frame */
   2847 	switch (sc->sc_type) {
   2848 	case WM_T_82573:
   2849 		/* XXX limited to 9234 if ASPM is disabled */
   2850 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2851 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2852 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2853 		break;
   2854 	case WM_T_82571:
   2855 	case WM_T_82572:
   2856 	case WM_T_82574:
   2857 	case WM_T_82583:
   2858 	case WM_T_82575:
   2859 	case WM_T_82576:
   2860 	case WM_T_82580:
   2861 	case WM_T_I350:
   2862 	case WM_T_I354:
   2863 	case WM_T_I210:
   2864 	case WM_T_I211:
   2865 	case WM_T_80003:
   2866 	case WM_T_ICH9:
   2867 	case WM_T_ICH10:
   2868 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2869 	case WM_T_PCH_LPT:
   2870 	case WM_T_PCH_SPT:
   2871 	case WM_T_PCH_CNP:
   2872 		/* XXX limited to 9234 */
   2873 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2874 		break;
   2875 	case WM_T_PCH:
   2876 		/* XXX limited to 4096 */
   2877 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2878 		break;
   2879 	case WM_T_82542_2_0:
   2880 	case WM_T_82542_2_1:
   2881 	case WM_T_ICH8:
   2882 		/* No support for jumbo frame */
   2883 		break;
   2884 	default:
   2885 		/* ETHER_MAX_LEN_JUMBO */
   2886 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2887 		break;
   2888 	}
   2889 
   2890 	/* If we're a i82543 or greater, we can support VLANs. */
   2891 	if (sc->sc_type >= WM_T_82543) {
   2892 		sc->sc_ethercom.ec_capabilities |=
   2893 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2894 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2895 	}
   2896 
   2897 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2898 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2899 
   2900 	/*
   2901 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2902 	 * on i82543 and later.
   2903 	 */
   2904 	if (sc->sc_type >= WM_T_82543) {
   2905 		ifp->if_capabilities |=
   2906 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2907 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2908 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2909 		    IFCAP_CSUM_TCPv6_Tx |
   2910 		    IFCAP_CSUM_UDPv6_Tx;
   2911 	}
   2912 
   2913 	/*
   2914 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2915 	 *
   2916 	 *	82541GI (8086:1076) ... no
   2917 	 *	82572EI (8086:10b9) ... yes
   2918 	 */
   2919 	if (sc->sc_type >= WM_T_82571) {
   2920 		ifp->if_capabilities |=
   2921 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2922 	}
   2923 
   2924 	/*
   2925 	 * If we're a i82544 or greater (except i82547), we can do
   2926 	 * TCP segmentation offload.
   2927 	 */
   2928 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2929 		ifp->if_capabilities |= IFCAP_TSOv4;
   2930 	}
   2931 
   2932 	if (sc->sc_type >= WM_T_82571) {
   2933 		ifp->if_capabilities |= IFCAP_TSOv6;
   2934 	}
   2935 
   2936 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2937 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2938 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2939 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2940 
   2941 #ifdef WM_MPSAFE
   2942 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2943 #else
   2944 	sc->sc_core_lock = NULL;
   2945 #endif
   2946 
   2947 	/* Attach the interface. */
   2948 	error = if_initialize(ifp);
   2949 	if (error != 0) {
   2950 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2951 		    error);
   2952 		return; /* Error */
   2953 	}
   2954 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2955 	ether_ifattach(ifp, enaddr);
   2956 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2957 	if_register(ifp);
   2958 
   2959 #ifdef WM_EVENT_COUNTERS
   2960 	/* Attach event counters. */
   2961 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2962 	    NULL, xname, "linkintr");
   2963 
   2964 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2965 	    NULL, xname, "tx_xoff");
   2966 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2967 	    NULL, xname, "tx_xon");
   2968 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2969 	    NULL, xname, "rx_xoff");
   2970 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2971 	    NULL, xname, "rx_xon");
   2972 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2973 	    NULL, xname, "rx_macctl");
   2974 #endif /* WM_EVENT_COUNTERS */
   2975 
   2976 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2977 		pmf_class_network_register(self, ifp);
   2978 	else
   2979 		aprint_error_dev(self, "couldn't establish power handler\n");
   2980 
   2981 	sc->sc_flags |= WM_F_ATTACHED;
   2982 out:
   2983 	return;
   2984 }
   2985 
   2986 /* The detach function (ca_detach) */
   2987 static int
   2988 wm_detach(device_t self, int flags __unused)
   2989 {
   2990 	struct wm_softc *sc = device_private(self);
   2991 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2992 	int i;
   2993 
   2994 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2995 		return 0;
   2996 
   2997 	/* Stop the interface. Callouts are stopped in it. */
   2998 	wm_stop(ifp, 1);
   2999 
   3000 	pmf_device_deregister(self);
   3001 
   3002 #ifdef WM_EVENT_COUNTERS
   3003 	evcnt_detach(&sc->sc_ev_linkintr);
   3004 
   3005 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3006 	evcnt_detach(&sc->sc_ev_tx_xon);
   3007 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3008 	evcnt_detach(&sc->sc_ev_rx_xon);
   3009 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3010 #endif /* WM_EVENT_COUNTERS */
   3011 
   3012 	/* Tell the firmware about the release */
   3013 	WM_CORE_LOCK(sc);
   3014 	wm_release_manageability(sc);
   3015 	wm_release_hw_control(sc);
   3016 	wm_enable_wakeup(sc);
   3017 	WM_CORE_UNLOCK(sc);
   3018 
   3019 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3020 
   3021 	/* Delete all remaining media. */
   3022 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3023 
   3024 	ether_ifdetach(ifp);
   3025 	if_detach(ifp);
   3026 	if_percpuq_destroy(sc->sc_ipq);
   3027 
   3028 	/* Unload RX dmamaps and free mbufs */
   3029 	for (i = 0; i < sc->sc_nqueues; i++) {
   3030 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3031 		mutex_enter(rxq->rxq_lock);
   3032 		wm_rxdrain(rxq);
   3033 		mutex_exit(rxq->rxq_lock);
   3034 	}
   3035 	/* Must unlock here */
   3036 
   3037 	/* Disestablish the interrupt handler */
   3038 	for (i = 0; i < sc->sc_nintrs; i++) {
   3039 		if (sc->sc_ihs[i] != NULL) {
   3040 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3041 			sc->sc_ihs[i] = NULL;
   3042 		}
   3043 	}
   3044 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3045 
   3046 	wm_free_txrx_queues(sc);
   3047 
   3048 	/* Unmap the registers */
   3049 	if (sc->sc_ss) {
   3050 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3051 		sc->sc_ss = 0;
   3052 	}
   3053 	if (sc->sc_ios) {
   3054 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3055 		sc->sc_ios = 0;
   3056 	}
   3057 	if (sc->sc_flashs) {
   3058 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3059 		sc->sc_flashs = 0;
   3060 	}
   3061 
   3062 	if (sc->sc_core_lock)
   3063 		mutex_obj_free(sc->sc_core_lock);
   3064 	if (sc->sc_ich_phymtx)
   3065 		mutex_obj_free(sc->sc_ich_phymtx);
   3066 	if (sc->sc_ich_nvmmtx)
   3067 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3068 
   3069 	return 0;
   3070 }
   3071 
   3072 static bool
   3073 wm_suspend(device_t self, const pmf_qual_t *qual)
   3074 {
   3075 	struct wm_softc *sc = device_private(self);
   3076 
   3077 	wm_release_manageability(sc);
   3078 	wm_release_hw_control(sc);
   3079 	wm_enable_wakeup(sc);
   3080 
   3081 	return true;
   3082 }
   3083 
   3084 static bool
   3085 wm_resume(device_t self, const pmf_qual_t *qual)
   3086 {
   3087 	struct wm_softc *sc = device_private(self);
   3088 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3089 	pcireg_t reg;
   3090 	char buf[256];
   3091 
   3092 	reg = CSR_READ(sc, WMREG_WUS);
   3093 	if (reg != 0) {
   3094 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3095 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3096 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3097 	}
   3098 
   3099 	if (sc->sc_type >= WM_T_PCH2)
   3100 		wm_resume_workarounds_pchlan(sc);
   3101 	if ((ifp->if_flags & IFF_UP) == 0) {
   3102 		wm_reset(sc);
   3103 		/* Non-AMT based hardware can now take control from firmware */
   3104 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3105 			wm_get_hw_control(sc);
   3106 		wm_init_manageability(sc);
   3107 	} else {
   3108 		/*
   3109 		 * We called pmf_class_network_register(), so if_init() is
   3110 		 * automatically called when IFF_UP. wm_reset(),
   3111 		 * wm_get_hw_control() and wm_init_manageability() are called
   3112 		 * via wm_init().
   3113 		 */
   3114 	}
   3115 
   3116 	return true;
   3117 }
   3118 
   3119 /*
   3120  * wm_watchdog:		[ifnet interface function]
   3121  *
   3122  *	Watchdog timer handler.
   3123  */
   3124 static void
   3125 wm_watchdog(struct ifnet *ifp)
   3126 {
   3127 	int qid;
   3128 	struct wm_softc *sc = ifp->if_softc;
   3129 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3130 
   3131 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3132 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3133 
   3134 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3135 	}
   3136 
   3137 	/* IF any of queues hanged up, reset the interface. */
   3138 	if (hang_queue != 0) {
   3139 		(void)wm_init(ifp);
   3140 
   3141 		/*
   3142 		 * There are still some upper layer processing which call
   3143 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3144 		 */
   3145 		/* Try to get more packets going. */
   3146 		ifp->if_start(ifp);
   3147 	}
   3148 }
   3149 
   3150 
   3151 static void
   3152 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3153 {
   3154 
   3155 	mutex_enter(txq->txq_lock);
   3156 	if (txq->txq_sending &&
   3157 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3158 		wm_watchdog_txq_locked(ifp, txq, hang);
   3159 
   3160 	mutex_exit(txq->txq_lock);
   3161 }
   3162 
   3163 static void
   3164 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3165     uint16_t *hang)
   3166 {
   3167 	struct wm_softc *sc = ifp->if_softc;
   3168 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3169 
   3170 	KASSERT(mutex_owned(txq->txq_lock));
   3171 
   3172 	/*
   3173 	 * Since we're using delayed interrupts, sweep up
   3174 	 * before we report an error.
   3175 	 */
   3176 	wm_txeof(txq, UINT_MAX);
   3177 
   3178 	if (txq->txq_sending)
   3179 		*hang |= __BIT(wmq->wmq_id);
   3180 
   3181 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3182 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3183 		    device_xname(sc->sc_dev));
   3184 	} else {
   3185 #ifdef WM_DEBUG
   3186 		int i, j;
   3187 		struct wm_txsoft *txs;
   3188 #endif
   3189 		log(LOG_ERR,
   3190 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3191 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3192 		    txq->txq_next);
   3193 		ifp->if_oerrors++;
   3194 #ifdef WM_DEBUG
   3195 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3196 		    i = WM_NEXTTXS(txq, i)) {
   3197 			txs = &txq->txq_soft[i];
   3198 			printf("txs %d tx %d -> %d\n",
   3199 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3200 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3201 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3202 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3203 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3204 					printf("\t %#08x%08x\n",
   3205 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3206 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3207 				} else {
   3208 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3209 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3210 					    txq->txq_descs[j].wtx_addr.wa_low);
   3211 					printf("\t %#04x%02x%02x%08x\n",
   3212 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3213 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3214 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3215 					    txq->txq_descs[j].wtx_cmdlen);
   3216 				}
   3217 				if (j == txs->txs_lastdesc)
   3218 					break;
   3219 			}
   3220 		}
   3221 #endif
   3222 	}
   3223 }
   3224 
   3225 /*
   3226  * wm_tick:
   3227  *
   3228  *	One second timer, used to check link status, sweep up
   3229  *	completed transmit jobs, etc.
   3230  */
   3231 static void
   3232 wm_tick(void *arg)
   3233 {
   3234 	struct wm_softc *sc = arg;
   3235 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3236 #ifndef WM_MPSAFE
   3237 	int s = splnet();
   3238 #endif
   3239 
   3240 	WM_CORE_LOCK(sc);
   3241 
   3242 	if (sc->sc_core_stopping) {
   3243 		WM_CORE_UNLOCK(sc);
   3244 #ifndef WM_MPSAFE
   3245 		splx(s);
   3246 #endif
   3247 		return;
   3248 	}
   3249 
   3250 	if (sc->sc_type >= WM_T_82542_2_1) {
   3251 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3252 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3253 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3254 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3255 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3256 	}
   3257 
   3258 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3259 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3260 	    + CSR_READ(sc, WMREG_CRCERRS)
   3261 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3262 	    + CSR_READ(sc, WMREG_SYMERRC)
   3263 	    + CSR_READ(sc, WMREG_RXERRC)
   3264 	    + CSR_READ(sc, WMREG_SEC)
   3265 	    + CSR_READ(sc, WMREG_CEXTERR)
   3266 	    + CSR_READ(sc, WMREG_RLEC);
   3267 	/*
   3268 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3269 	 * memory. It does not mean the number of dropped packet. Because
   3270 	 * ethernet controller can receive packets in such case if there is
   3271 	 * space in phy's FIFO.
   3272 	 *
   3273 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3274 	 * own EVCNT instead of if_iqdrops.
   3275 	 */
   3276 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3277 
   3278 	if (sc->sc_flags & WM_F_HAS_MII)
   3279 		mii_tick(&sc->sc_mii);
   3280 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3281 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3282 		wm_serdes_tick(sc);
   3283 	else
   3284 		wm_tbi_tick(sc);
   3285 
   3286 	WM_CORE_UNLOCK(sc);
   3287 
   3288 	wm_watchdog(ifp);
   3289 
   3290 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3291 }
   3292 
   3293 static int
   3294 wm_ifflags_cb(struct ethercom *ec)
   3295 {
   3296 	struct ifnet *ifp = &ec->ec_if;
   3297 	struct wm_softc *sc = ifp->if_softc;
   3298 	u_short iffchange;
   3299 	int ecchange;
   3300 	bool needreset = false;
   3301 	int rc = 0;
   3302 
   3303 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3304 		device_xname(sc->sc_dev), __func__));
   3305 
   3306 	WM_CORE_LOCK(sc);
   3307 
   3308 	/*
   3309 	 * Check for if_flags.
   3310 	 * Main usage is to prevent linkdown when opening bpf.
   3311 	 */
   3312 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3313 	sc->sc_if_flags = ifp->if_flags;
   3314 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3315 		needreset = true;
   3316 		goto ec;
   3317 	}
   3318 
   3319 	/* iff related updates */
   3320 	if ((iffchange & IFF_PROMISC) != 0)
   3321 		wm_set_filter(sc);
   3322 
   3323 	wm_set_vlan(sc);
   3324 
   3325 ec:
   3326 	/* Check for ec_capenable. */
   3327 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3328 	sc->sc_ec_capenable = ec->ec_capenable;
   3329 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3330 		needreset = true;
   3331 		goto out;
   3332 	}
   3333 
   3334 	/* ec related updates */
   3335 	wm_set_eee(sc);
   3336 
   3337 out:
   3338 	if (needreset)
   3339 		rc = ENETRESET;
   3340 	WM_CORE_UNLOCK(sc);
   3341 
   3342 	return rc;
   3343 }
   3344 
   3345 /*
   3346  * wm_ioctl:		[ifnet interface function]
   3347  *
   3348  *	Handle control requests from the operator.
   3349  */
   3350 static int
   3351 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3352 {
   3353 	struct wm_softc *sc = ifp->if_softc;
   3354 	struct ifreq *ifr = (struct ifreq *)data;
   3355 	struct ifaddr *ifa = (struct ifaddr *)data;
   3356 	struct sockaddr_dl *sdl;
   3357 	int s, error;
   3358 
   3359 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3360 		device_xname(sc->sc_dev), __func__));
   3361 
   3362 #ifndef WM_MPSAFE
   3363 	s = splnet();
   3364 #endif
   3365 	switch (cmd) {
   3366 	case SIOCSIFMEDIA:
   3367 		WM_CORE_LOCK(sc);
   3368 		/* Flow control requires full-duplex mode. */
   3369 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3370 		    (ifr->ifr_media & IFM_FDX) == 0)
   3371 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3372 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3373 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3374 				/* We can do both TXPAUSE and RXPAUSE. */
   3375 				ifr->ifr_media |=
   3376 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3377 			}
   3378 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3379 		}
   3380 		WM_CORE_UNLOCK(sc);
   3381 #ifdef WM_MPSAFE
   3382 		s = splnet();
   3383 #endif
   3384 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3385 #ifdef WM_MPSAFE
   3386 		splx(s);
   3387 #endif
   3388 		break;
   3389 	case SIOCINITIFADDR:
   3390 		WM_CORE_LOCK(sc);
   3391 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3392 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3393 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3394 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3395 			/* Unicast address is the first multicast entry */
   3396 			wm_set_filter(sc);
   3397 			error = 0;
   3398 			WM_CORE_UNLOCK(sc);
   3399 			break;
   3400 		}
   3401 		WM_CORE_UNLOCK(sc);
   3402 		/*FALLTHROUGH*/
   3403 	default:
   3404 #ifdef WM_MPSAFE
   3405 		s = splnet();
   3406 #endif
   3407 		/* It may call wm_start, so unlock here */
   3408 		error = ether_ioctl(ifp, cmd, data);
   3409 #ifdef WM_MPSAFE
   3410 		splx(s);
   3411 #endif
   3412 		if (error != ENETRESET)
   3413 			break;
   3414 
   3415 		error = 0;
   3416 
   3417 		if (cmd == SIOCSIFCAP)
   3418 			error = (*ifp->if_init)(ifp);
   3419 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3420 			;
   3421 		else if (ifp->if_flags & IFF_RUNNING) {
   3422 			/*
   3423 			 * Multicast list has changed; set the hardware filter
   3424 			 * accordingly.
   3425 			 */
   3426 			WM_CORE_LOCK(sc);
   3427 			wm_set_filter(sc);
   3428 			WM_CORE_UNLOCK(sc);
   3429 		}
   3430 		break;
   3431 	}
   3432 
   3433 #ifndef WM_MPSAFE
   3434 	splx(s);
   3435 #endif
   3436 	return error;
   3437 }
   3438 
   3439 /* MAC address related */
   3440 
   3441 /*
   3442  * Get the offset of MAC address and return it.
   3443  * If error occured, use offset 0.
   3444  */
   3445 static uint16_t
   3446 wm_check_alt_mac_addr(struct wm_softc *sc)
   3447 {
   3448 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3449 	uint16_t offset = NVM_OFF_MACADDR;
   3450 
   3451 	/* Try to read alternative MAC address pointer */
   3452 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3453 		return 0;
   3454 
   3455 	/* Check pointer if it's valid or not. */
   3456 	if ((offset == 0x0000) || (offset == 0xffff))
   3457 		return 0;
   3458 
   3459 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3460 	/*
   3461 	 * Check whether alternative MAC address is valid or not.
   3462 	 * Some cards have non 0xffff pointer but those don't use
   3463 	 * alternative MAC address in reality.
   3464 	 *
   3465 	 * Check whether the broadcast bit is set or not.
   3466 	 */
   3467 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3468 		if (((myea[0] & 0xff) & 0x01) == 0)
   3469 			return offset; /* Found */
   3470 
   3471 	/* Not found */
   3472 	return 0;
   3473 }
   3474 
   3475 static int
   3476 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3477 {
   3478 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3479 	uint16_t offset = NVM_OFF_MACADDR;
   3480 	int do_invert = 0;
   3481 
   3482 	switch (sc->sc_type) {
   3483 	case WM_T_82580:
   3484 	case WM_T_I350:
   3485 	case WM_T_I354:
   3486 		/* EEPROM Top Level Partitioning */
   3487 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3488 		break;
   3489 	case WM_T_82571:
   3490 	case WM_T_82575:
   3491 	case WM_T_82576:
   3492 	case WM_T_80003:
   3493 	case WM_T_I210:
   3494 	case WM_T_I211:
   3495 		offset = wm_check_alt_mac_addr(sc);
   3496 		if (offset == 0)
   3497 			if ((sc->sc_funcid & 0x01) == 1)
   3498 				do_invert = 1;
   3499 		break;
   3500 	default:
   3501 		if ((sc->sc_funcid & 0x01) == 1)
   3502 			do_invert = 1;
   3503 		break;
   3504 	}
   3505 
   3506 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3507 		goto bad;
   3508 
   3509 	enaddr[0] = myea[0] & 0xff;
   3510 	enaddr[1] = myea[0] >> 8;
   3511 	enaddr[2] = myea[1] & 0xff;
   3512 	enaddr[3] = myea[1] >> 8;
   3513 	enaddr[4] = myea[2] & 0xff;
   3514 	enaddr[5] = myea[2] >> 8;
   3515 
   3516 	/*
   3517 	 * Toggle the LSB of the MAC address on the second port
   3518 	 * of some dual port cards.
   3519 	 */
   3520 	if (do_invert != 0)
   3521 		enaddr[5] ^= 1;
   3522 
   3523 	return 0;
   3524 
   3525  bad:
   3526 	return -1;
   3527 }
   3528 
   3529 /*
   3530  * wm_set_ral:
   3531  *
   3532  *	Set an entery in the receive address list.
   3533  */
   3534 static void
   3535 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3536 {
   3537 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3538 	uint32_t wlock_mac;
   3539 	int rv;
   3540 
   3541 	if (enaddr != NULL) {
   3542 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3543 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3544 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3545 		ral_hi |= RAL_AV;
   3546 	} else {
   3547 		ral_lo = 0;
   3548 		ral_hi = 0;
   3549 	}
   3550 
   3551 	switch (sc->sc_type) {
   3552 	case WM_T_82542_2_0:
   3553 	case WM_T_82542_2_1:
   3554 	case WM_T_82543:
   3555 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3556 		CSR_WRITE_FLUSH(sc);
   3557 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3558 		CSR_WRITE_FLUSH(sc);
   3559 		break;
   3560 	case WM_T_PCH2:
   3561 	case WM_T_PCH_LPT:
   3562 	case WM_T_PCH_SPT:
   3563 	case WM_T_PCH_CNP:
   3564 		if (idx == 0) {
   3565 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3566 			CSR_WRITE_FLUSH(sc);
   3567 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3568 			CSR_WRITE_FLUSH(sc);
   3569 			return;
   3570 		}
   3571 		if (sc->sc_type != WM_T_PCH2) {
   3572 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3573 			    FWSM_WLOCK_MAC);
   3574 			addrl = WMREG_SHRAL(idx - 1);
   3575 			addrh = WMREG_SHRAH(idx - 1);
   3576 		} else {
   3577 			wlock_mac = 0;
   3578 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3579 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3580 		}
   3581 
   3582 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3583 			rv = wm_get_swflag_ich8lan(sc);
   3584 			if (rv != 0)
   3585 				return;
   3586 			CSR_WRITE(sc, addrl, ral_lo);
   3587 			CSR_WRITE_FLUSH(sc);
   3588 			CSR_WRITE(sc, addrh, ral_hi);
   3589 			CSR_WRITE_FLUSH(sc);
   3590 			wm_put_swflag_ich8lan(sc);
   3591 		}
   3592 
   3593 		break;
   3594 	default:
   3595 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3596 		CSR_WRITE_FLUSH(sc);
   3597 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3598 		CSR_WRITE_FLUSH(sc);
   3599 		break;
   3600 	}
   3601 }
   3602 
   3603 /*
   3604  * wm_mchash:
   3605  *
   3606  *	Compute the hash of the multicast address for the 4096-bit
   3607  *	multicast filter.
   3608  */
   3609 static uint32_t
   3610 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3611 {
   3612 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3613 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3614 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3615 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3616 	uint32_t hash;
   3617 
   3618 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3619 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3620 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3621 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3622 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3623 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3624 		return (hash & 0x3ff);
   3625 	}
   3626 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3627 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3628 
   3629 	return (hash & 0xfff);
   3630 }
   3631 
   3632 /*
   3633  *
   3634  *
   3635  */
   3636 static int
   3637 wm_rar_count(struct wm_softc *sc)
   3638 {
   3639 	int size;
   3640 
   3641 	switch (sc->sc_type) {
   3642 	case WM_T_ICH8:
   3643 		size = WM_RAL_TABSIZE_ICH8 -1;
   3644 		break;
   3645 	case WM_T_ICH9:
   3646 	case WM_T_ICH10:
   3647 	case WM_T_PCH:
   3648 		size = WM_RAL_TABSIZE_ICH8;
   3649 		break;
   3650 	case WM_T_PCH2:
   3651 		size = WM_RAL_TABSIZE_PCH2;
   3652 		break;
   3653 	case WM_T_PCH_LPT:
   3654 	case WM_T_PCH_SPT:
   3655 	case WM_T_PCH_CNP:
   3656 		size = WM_RAL_TABSIZE_PCH_LPT;
   3657 		break;
   3658 	case WM_T_82575:
   3659 	case WM_T_I210:
   3660 	case WM_T_I211:
   3661 		size = WM_RAL_TABSIZE_82575;
   3662 		break;
   3663 	case WM_T_82576:
   3664 	case WM_T_82580:
   3665 		size = WM_RAL_TABSIZE_82576;
   3666 		break;
   3667 	case WM_T_I350:
   3668 	case WM_T_I354:
   3669 		size = WM_RAL_TABSIZE_I350;
   3670 		break;
   3671 	default:
   3672 		size = WM_RAL_TABSIZE;
   3673 	}
   3674 
   3675 	return size;
   3676 }
   3677 
   3678 /*
   3679  * wm_set_filter:
   3680  *
   3681  *	Set up the receive filter.
   3682  */
   3683 static void
   3684 wm_set_filter(struct wm_softc *sc)
   3685 {
   3686 	struct ethercom *ec = &sc->sc_ethercom;
   3687 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3688 	struct ether_multi *enm;
   3689 	struct ether_multistep step;
   3690 	bus_addr_t mta_reg;
   3691 	uint32_t hash, reg, bit;
   3692 	int i, size, ralmax;
   3693 
   3694 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3695 		device_xname(sc->sc_dev), __func__));
   3696 
   3697 	if (sc->sc_type >= WM_T_82544)
   3698 		mta_reg = WMREG_CORDOVA_MTA;
   3699 	else
   3700 		mta_reg = WMREG_MTA;
   3701 
   3702 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3703 
   3704 	if (ifp->if_flags & IFF_BROADCAST)
   3705 		sc->sc_rctl |= RCTL_BAM;
   3706 	if (ifp->if_flags & IFF_PROMISC) {
   3707 		sc->sc_rctl |= RCTL_UPE;
   3708 		ETHER_LOCK(ec);
   3709 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3710 		ETHER_UNLOCK(ec);
   3711 		goto allmulti;
   3712 	}
   3713 
   3714 	/*
   3715 	 * Set the station address in the first RAL slot, and
   3716 	 * clear the remaining slots.
   3717 	 */
   3718 	size = wm_rar_count(sc);
   3719 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3720 
   3721 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3722 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3723 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3724 		switch (i) {
   3725 		case 0:
   3726 			/* We can use all entries */
   3727 			ralmax = size;
   3728 			break;
   3729 		case 1:
   3730 			/* Only RAR[0] */
   3731 			ralmax = 1;
   3732 			break;
   3733 		default:
   3734 			/* Available SHRA + RAR[0] */
   3735 			ralmax = i + 1;
   3736 		}
   3737 	} else
   3738 		ralmax = size;
   3739 	for (i = 1; i < size; i++) {
   3740 		if (i < ralmax)
   3741 			wm_set_ral(sc, NULL, i);
   3742 	}
   3743 
   3744 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3745 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3746 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3747 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3748 		size = WM_ICH8_MC_TABSIZE;
   3749 	else
   3750 		size = WM_MC_TABSIZE;
   3751 	/* Clear out the multicast table. */
   3752 	for (i = 0; i < size; i++) {
   3753 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3754 		CSR_WRITE_FLUSH(sc);
   3755 	}
   3756 
   3757 	ETHER_LOCK(ec);
   3758 	ETHER_FIRST_MULTI(step, ec, enm);
   3759 	while (enm != NULL) {
   3760 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3761 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3762 			ETHER_UNLOCK(ec);
   3763 			/*
   3764 			 * We must listen to a range of multicast addresses.
   3765 			 * For now, just accept all multicasts, rather than
   3766 			 * trying to set only those filter bits needed to match
   3767 			 * the range.  (At this time, the only use of address
   3768 			 * ranges is for IP multicast routing, for which the
   3769 			 * range is big enough to require all bits set.)
   3770 			 */
   3771 			goto allmulti;
   3772 		}
   3773 
   3774 		hash = wm_mchash(sc, enm->enm_addrlo);
   3775 
   3776 		reg = (hash >> 5);
   3777 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3778 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3779 		    || (sc->sc_type == WM_T_PCH2)
   3780 		    || (sc->sc_type == WM_T_PCH_LPT)
   3781 		    || (sc->sc_type == WM_T_PCH_SPT)
   3782 		    || (sc->sc_type == WM_T_PCH_CNP))
   3783 			reg &= 0x1f;
   3784 		else
   3785 			reg &= 0x7f;
   3786 		bit = hash & 0x1f;
   3787 
   3788 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3789 		hash |= 1U << bit;
   3790 
   3791 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3792 			/*
   3793 			 * 82544 Errata 9: Certain register cannot be written
   3794 			 * with particular alignments in PCI-X bus operation
   3795 			 * (FCAH, MTA and VFTA).
   3796 			 */
   3797 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3798 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3799 			CSR_WRITE_FLUSH(sc);
   3800 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3801 			CSR_WRITE_FLUSH(sc);
   3802 		} else {
   3803 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3804 			CSR_WRITE_FLUSH(sc);
   3805 		}
   3806 
   3807 		ETHER_NEXT_MULTI(step, enm);
   3808 	}
   3809 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3810 	ETHER_UNLOCK(ec);
   3811 
   3812 	goto setit;
   3813 
   3814  allmulti:
   3815 	sc->sc_rctl |= RCTL_MPE;
   3816 
   3817  setit:
   3818 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3819 }
   3820 
   3821 /* Reset and init related */
   3822 
   3823 static void
   3824 wm_set_vlan(struct wm_softc *sc)
   3825 {
   3826 
   3827 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3828 		device_xname(sc->sc_dev), __func__));
   3829 
   3830 	/* Deal with VLAN enables. */
   3831 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3832 		sc->sc_ctrl |= CTRL_VME;
   3833 	else
   3834 		sc->sc_ctrl &= ~CTRL_VME;
   3835 
   3836 	/* Write the control registers. */
   3837 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3838 }
   3839 
   3840 static void
   3841 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3842 {
   3843 	uint32_t gcr;
   3844 	pcireg_t ctrl2;
   3845 
   3846 	gcr = CSR_READ(sc, WMREG_GCR);
   3847 
   3848 	/* Only take action if timeout value is defaulted to 0 */
   3849 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3850 		goto out;
   3851 
   3852 	if ((gcr & GCR_CAP_VER2) == 0) {
   3853 		gcr |= GCR_CMPL_TMOUT_10MS;
   3854 		goto out;
   3855 	}
   3856 
   3857 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3858 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3859 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3860 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3861 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3862 
   3863 out:
   3864 	/* Disable completion timeout resend */
   3865 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3866 
   3867 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3868 }
   3869 
   3870 void
   3871 wm_get_auto_rd_done(struct wm_softc *sc)
   3872 {
   3873 	int i;
   3874 
   3875 	/* wait for eeprom to reload */
   3876 	switch (sc->sc_type) {
   3877 	case WM_T_82571:
   3878 	case WM_T_82572:
   3879 	case WM_T_82573:
   3880 	case WM_T_82574:
   3881 	case WM_T_82583:
   3882 	case WM_T_82575:
   3883 	case WM_T_82576:
   3884 	case WM_T_82580:
   3885 	case WM_T_I350:
   3886 	case WM_T_I354:
   3887 	case WM_T_I210:
   3888 	case WM_T_I211:
   3889 	case WM_T_80003:
   3890 	case WM_T_ICH8:
   3891 	case WM_T_ICH9:
   3892 		for (i = 0; i < 10; i++) {
   3893 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3894 				break;
   3895 			delay(1000);
   3896 		}
   3897 		if (i == 10) {
   3898 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3899 			    "complete\n", device_xname(sc->sc_dev));
   3900 		}
   3901 		break;
   3902 	default:
   3903 		break;
   3904 	}
   3905 }
   3906 
   3907 void
   3908 wm_lan_init_done(struct wm_softc *sc)
   3909 {
   3910 	uint32_t reg = 0;
   3911 	int i;
   3912 
   3913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3914 		device_xname(sc->sc_dev), __func__));
   3915 
   3916 	/* Wait for eeprom to reload */
   3917 	switch (sc->sc_type) {
   3918 	case WM_T_ICH10:
   3919 	case WM_T_PCH:
   3920 	case WM_T_PCH2:
   3921 	case WM_T_PCH_LPT:
   3922 	case WM_T_PCH_SPT:
   3923 	case WM_T_PCH_CNP:
   3924 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3925 			reg = CSR_READ(sc, WMREG_STATUS);
   3926 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3927 				break;
   3928 			delay(100);
   3929 		}
   3930 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3931 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3932 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3933 		}
   3934 		break;
   3935 	default:
   3936 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3937 		    __func__);
   3938 		break;
   3939 	}
   3940 
   3941 	reg &= ~STATUS_LAN_INIT_DONE;
   3942 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3943 }
   3944 
   3945 void
   3946 wm_get_cfg_done(struct wm_softc *sc)
   3947 {
   3948 	int mask;
   3949 	uint32_t reg;
   3950 	int i;
   3951 
   3952 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3953 		device_xname(sc->sc_dev), __func__));
   3954 
   3955 	/* Wait for eeprom to reload */
   3956 	switch (sc->sc_type) {
   3957 	case WM_T_82542_2_0:
   3958 	case WM_T_82542_2_1:
   3959 		/* null */
   3960 		break;
   3961 	case WM_T_82543:
   3962 	case WM_T_82544:
   3963 	case WM_T_82540:
   3964 	case WM_T_82545:
   3965 	case WM_T_82545_3:
   3966 	case WM_T_82546:
   3967 	case WM_T_82546_3:
   3968 	case WM_T_82541:
   3969 	case WM_T_82541_2:
   3970 	case WM_T_82547:
   3971 	case WM_T_82547_2:
   3972 	case WM_T_82573:
   3973 	case WM_T_82574:
   3974 	case WM_T_82583:
   3975 		/* generic */
   3976 		delay(10*1000);
   3977 		break;
   3978 	case WM_T_80003:
   3979 	case WM_T_82571:
   3980 	case WM_T_82572:
   3981 	case WM_T_82575:
   3982 	case WM_T_82576:
   3983 	case WM_T_82580:
   3984 	case WM_T_I350:
   3985 	case WM_T_I354:
   3986 	case WM_T_I210:
   3987 	case WM_T_I211:
   3988 		if (sc->sc_type == WM_T_82571) {
   3989 			/* Only 82571 shares port 0 */
   3990 			mask = EEMNGCTL_CFGDONE_0;
   3991 		} else
   3992 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3993 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3994 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3995 				break;
   3996 			delay(1000);
   3997 		}
   3998 		if (i >= WM_PHY_CFG_TIMEOUT)
   3999 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4000 				device_xname(sc->sc_dev), __func__));
   4001 		break;
   4002 	case WM_T_ICH8:
   4003 	case WM_T_ICH9:
   4004 	case WM_T_ICH10:
   4005 	case WM_T_PCH:
   4006 	case WM_T_PCH2:
   4007 	case WM_T_PCH_LPT:
   4008 	case WM_T_PCH_SPT:
   4009 	case WM_T_PCH_CNP:
   4010 		delay(10*1000);
   4011 		if (sc->sc_type >= WM_T_ICH10)
   4012 			wm_lan_init_done(sc);
   4013 		else
   4014 			wm_get_auto_rd_done(sc);
   4015 
   4016 		/* Clear PHY Reset Asserted bit */
   4017 		reg = CSR_READ(sc, WMREG_STATUS);
   4018 		if ((reg & STATUS_PHYRA) != 0)
   4019 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4020 		break;
   4021 	default:
   4022 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4023 		    __func__);
   4024 		break;
   4025 	}
   4026 }
   4027 
   4028 int
   4029 wm_phy_post_reset(struct wm_softc *sc)
   4030 {
   4031 	device_t dev = sc->sc_dev;
   4032 	uint16_t reg;
   4033 	int rv = 0;
   4034 
   4035 	/* This function is only for ICH8 and newer. */
   4036 	if (sc->sc_type < WM_T_ICH8)
   4037 		return 0;
   4038 
   4039 	if (wm_phy_resetisblocked(sc)) {
   4040 		/* XXX */
   4041 		device_printf(dev, "PHY is blocked\n");
   4042 		return -1;
   4043 	}
   4044 
   4045 	/* Allow time for h/w to get to quiescent state after reset */
   4046 	delay(10*1000);
   4047 
   4048 	/* Perform any necessary post-reset workarounds */
   4049 	if (sc->sc_type == WM_T_PCH)
   4050 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4051 	else if (sc->sc_type == WM_T_PCH2)
   4052 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4053 	if (rv != 0)
   4054 		return rv;
   4055 
   4056 	/* Clear the host wakeup bit after lcd reset */
   4057 	if (sc->sc_type >= WM_T_PCH) {
   4058 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4059 		reg &= ~BM_WUC_HOST_WU_BIT;
   4060 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4061 	}
   4062 
   4063 	/* Configure the LCD with the extended configuration region in NVM */
   4064 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4065 		return rv;
   4066 
   4067 	/* Configure the LCD with the OEM bits in NVM */
   4068 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4069 
   4070 	if (sc->sc_type == WM_T_PCH2) {
   4071 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4072 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4073 			delay(10 * 1000);
   4074 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4075 		}
   4076 		/* Set EEE LPI Update Timer to 200usec */
   4077 		rv = sc->phy.acquire(sc);
   4078 		if (rv)
   4079 			return rv;
   4080 		rv = wm_write_emi_reg_locked(dev,
   4081 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4082 		sc->phy.release(sc);
   4083 	}
   4084 
   4085 	return rv;
   4086 }
   4087 
   4088 /* Only for PCH and newer */
   4089 static int
   4090 wm_write_smbus_addr(struct wm_softc *sc)
   4091 {
   4092 	uint32_t strap, freq;
   4093 	uint16_t phy_data;
   4094 	int rv;
   4095 
   4096 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4097 		device_xname(sc->sc_dev), __func__));
   4098 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4099 
   4100 	strap = CSR_READ(sc, WMREG_STRAP);
   4101 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4102 
   4103 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4104 	if (rv != 0)
   4105 		return -1;
   4106 
   4107 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4108 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4109 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4110 
   4111 	if (sc->sc_phytype == WMPHY_I217) {
   4112 		/* Restore SMBus frequency */
   4113 		if (freq --) {
   4114 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4115 			    | HV_SMB_ADDR_FREQ_HIGH);
   4116 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4117 			    HV_SMB_ADDR_FREQ_LOW);
   4118 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4119 			    HV_SMB_ADDR_FREQ_HIGH);
   4120 		} else
   4121 			DPRINTF(WM_DEBUG_INIT,
   4122 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4123 				device_xname(sc->sc_dev), __func__));
   4124 	}
   4125 
   4126 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4127 	    phy_data);
   4128 }
   4129 
   4130 static int
   4131 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4132 {
   4133 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4134 	uint16_t phy_page = 0;
   4135 	int rv = 0;
   4136 
   4137 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4138 		device_xname(sc->sc_dev), __func__));
   4139 
   4140 	switch (sc->sc_type) {
   4141 	case WM_T_ICH8:
   4142 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4143 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4144 			return 0;
   4145 
   4146 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4147 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4148 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4149 			break;
   4150 		}
   4151 		/* FALLTHROUGH */
   4152 	case WM_T_PCH:
   4153 	case WM_T_PCH2:
   4154 	case WM_T_PCH_LPT:
   4155 	case WM_T_PCH_SPT:
   4156 	case WM_T_PCH_CNP:
   4157 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4158 		break;
   4159 	default:
   4160 		return 0;
   4161 	}
   4162 
   4163 	if ((rv = sc->phy.acquire(sc)) != 0)
   4164 		return rv;
   4165 
   4166 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4167 	if ((reg & sw_cfg_mask) == 0)
   4168 		goto release;
   4169 
   4170 	/*
   4171 	 * Make sure HW does not configure LCD from PHY extended configuration
   4172 	 * before SW configuration
   4173 	 */
   4174 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4175 	if ((sc->sc_type < WM_T_PCH2)
   4176 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4177 		goto release;
   4178 
   4179 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4180 		device_xname(sc->sc_dev), __func__));
   4181 	/* word_addr is in DWORD */
   4182 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4183 
   4184 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4185 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4186 	if (cnf_size == 0)
   4187 		goto release;
   4188 
   4189 	if (((sc->sc_type == WM_T_PCH)
   4190 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4191 	    || (sc->sc_type > WM_T_PCH)) {
   4192 		/*
   4193 		 * HW configures the SMBus address and LEDs when the OEM and
   4194 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4195 		 * are cleared, SW will configure them instead.
   4196 		 */
   4197 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4198 			device_xname(sc->sc_dev), __func__));
   4199 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4200 			goto release;
   4201 
   4202 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4203 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4204 		    (uint16_t)reg);
   4205 		if (rv != 0)
   4206 			goto release;
   4207 	}
   4208 
   4209 	/* Configure LCD from extended configuration region. */
   4210 	for (i = 0; i < cnf_size; i++) {
   4211 		uint16_t reg_data, reg_addr;
   4212 
   4213 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4214 			goto release;
   4215 
   4216 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4217 			goto release;
   4218 
   4219 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4220 			phy_page = reg_data;
   4221 
   4222 		reg_addr &= IGPHY_MAXREGADDR;
   4223 		reg_addr |= phy_page;
   4224 
   4225 		KASSERT(sc->phy.writereg_locked != NULL);
   4226 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4227 		    reg_data);
   4228 	}
   4229 
   4230 release:
   4231 	sc->phy.release(sc);
   4232 	return rv;
   4233 }
   4234 
   4235 /*
   4236  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4237  *  @sc:       pointer to the HW structure
   4238  *  @d0_state: boolean if entering d0 or d3 device state
   4239  *
   4240  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4241  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4242  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4243  */
   4244 int
   4245 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4246 {
   4247 	uint32_t mac_reg;
   4248 	uint16_t oem_reg;
   4249 	int rv;
   4250 
   4251 	if (sc->sc_type < WM_T_PCH)
   4252 		return 0;
   4253 
   4254 	rv = sc->phy.acquire(sc);
   4255 	if (rv != 0)
   4256 		return rv;
   4257 
   4258 	if (sc->sc_type == WM_T_PCH) {
   4259 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4260 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4261 			goto release;
   4262 	}
   4263 
   4264 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4265 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4266 		goto release;
   4267 
   4268 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4269 
   4270 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4271 	if (rv != 0)
   4272 		goto release;
   4273 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4274 
   4275 	if (d0_state) {
   4276 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4277 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4278 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4279 			oem_reg |= HV_OEM_BITS_LPLU;
   4280 	} else {
   4281 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4282 		    != 0)
   4283 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4284 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4285 		    != 0)
   4286 			oem_reg |= HV_OEM_BITS_LPLU;
   4287 	}
   4288 
   4289 	/* Set Restart auto-neg to activate the bits */
   4290 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4291 	    && (wm_phy_resetisblocked(sc) == false))
   4292 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4293 
   4294 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4295 
   4296 release:
   4297 	sc->phy.release(sc);
   4298 
   4299 	return rv;
   4300 }
   4301 
   4302 /* Init hardware bits */
   4303 void
   4304 wm_initialize_hardware_bits(struct wm_softc *sc)
   4305 {
   4306 	uint32_t tarc0, tarc1, reg;
   4307 
   4308 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4309 		device_xname(sc->sc_dev), __func__));
   4310 
   4311 	/* For 82571 variant, 80003 and ICHs */
   4312 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4313 	    || (sc->sc_type >= WM_T_80003)) {
   4314 
   4315 		/* Transmit Descriptor Control 0 */
   4316 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4317 		reg |= TXDCTL_COUNT_DESC;
   4318 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4319 
   4320 		/* Transmit Descriptor Control 1 */
   4321 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4322 		reg |= TXDCTL_COUNT_DESC;
   4323 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4324 
   4325 		/* TARC0 */
   4326 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4327 		switch (sc->sc_type) {
   4328 		case WM_T_82571:
   4329 		case WM_T_82572:
   4330 		case WM_T_82573:
   4331 		case WM_T_82574:
   4332 		case WM_T_82583:
   4333 		case WM_T_80003:
   4334 			/* Clear bits 30..27 */
   4335 			tarc0 &= ~__BITS(30, 27);
   4336 			break;
   4337 		default:
   4338 			break;
   4339 		}
   4340 
   4341 		switch (sc->sc_type) {
   4342 		case WM_T_82571:
   4343 		case WM_T_82572:
   4344 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4345 
   4346 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4347 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4348 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4349 			/* 8257[12] Errata No.7 */
   4350 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4351 
   4352 			/* TARC1 bit 28 */
   4353 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4354 				tarc1 &= ~__BIT(28);
   4355 			else
   4356 				tarc1 |= __BIT(28);
   4357 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4358 
   4359 			/*
   4360 			 * 8257[12] Errata No.13
   4361 			 * Disable Dyamic Clock Gating.
   4362 			 */
   4363 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4364 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4365 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4366 			break;
   4367 		case WM_T_82573:
   4368 		case WM_T_82574:
   4369 		case WM_T_82583:
   4370 			if ((sc->sc_type == WM_T_82574)
   4371 			    || (sc->sc_type == WM_T_82583))
   4372 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4373 
   4374 			/* Extended Device Control */
   4375 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4376 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4377 			reg |= __BIT(22);	/* Set bit 22 */
   4378 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4379 
   4380 			/* Device Control */
   4381 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4382 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4383 
   4384 			/* PCIe Control Register */
   4385 			/*
   4386 			 * 82573 Errata (unknown).
   4387 			 *
   4388 			 * 82574 Errata 25 and 82583 Errata 12
   4389 			 * "Dropped Rx Packets":
   4390 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4391 			 */
   4392 			reg = CSR_READ(sc, WMREG_GCR);
   4393 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4394 			CSR_WRITE(sc, WMREG_GCR, reg);
   4395 
   4396 			if ((sc->sc_type == WM_T_82574)
   4397 			    || (sc->sc_type == WM_T_82583)) {
   4398 				/*
   4399 				 * Document says this bit must be set for
   4400 				 * proper operation.
   4401 				 */
   4402 				reg = CSR_READ(sc, WMREG_GCR);
   4403 				reg |= __BIT(22);
   4404 				CSR_WRITE(sc, WMREG_GCR, reg);
   4405 
   4406 				/*
   4407 				 * Apply workaround for hardware errata
   4408 				 * documented in errata docs Fixes issue where
   4409 				 * some error prone or unreliable PCIe
   4410 				 * completions are occurring, particularly
   4411 				 * with ASPM enabled. Without fix, issue can
   4412 				 * cause Tx timeouts.
   4413 				 */
   4414 				reg = CSR_READ(sc, WMREG_GCR2);
   4415 				reg |= __BIT(0);
   4416 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4417 			}
   4418 			break;
   4419 		case WM_T_80003:
   4420 			/* TARC0 */
   4421 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4422 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4423 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4424 
   4425 			/* TARC1 bit 28 */
   4426 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4427 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4428 				tarc1 &= ~__BIT(28);
   4429 			else
   4430 				tarc1 |= __BIT(28);
   4431 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4432 			break;
   4433 		case WM_T_ICH8:
   4434 		case WM_T_ICH9:
   4435 		case WM_T_ICH10:
   4436 		case WM_T_PCH:
   4437 		case WM_T_PCH2:
   4438 		case WM_T_PCH_LPT:
   4439 		case WM_T_PCH_SPT:
   4440 		case WM_T_PCH_CNP:
   4441 			/* TARC0 */
   4442 			if (sc->sc_type == WM_T_ICH8) {
   4443 				/* Set TARC0 bits 29 and 28 */
   4444 				tarc0 |= __BITS(29, 28);
   4445 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4446 				tarc0 |= __BIT(29);
   4447 				/*
   4448 				 *  Drop bit 28. From Linux.
   4449 				 * See I218/I219 spec update
   4450 				 * "5. Buffer Overrun While the I219 is
   4451 				 * Processing DMA Transactions"
   4452 				 */
   4453 				tarc0 &= ~__BIT(28);
   4454 			}
   4455 			/* Set TARC0 bits 23,24,26,27 */
   4456 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4457 
   4458 			/* CTRL_EXT */
   4459 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4460 			reg |= __BIT(22);	/* Set bit 22 */
   4461 			/*
   4462 			 * Enable PHY low-power state when MAC is at D3
   4463 			 * w/o WoL
   4464 			 */
   4465 			if (sc->sc_type >= WM_T_PCH)
   4466 				reg |= CTRL_EXT_PHYPDEN;
   4467 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4468 
   4469 			/* TARC1 */
   4470 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4471 			/* bit 28 */
   4472 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4473 				tarc1 &= ~__BIT(28);
   4474 			else
   4475 				tarc1 |= __BIT(28);
   4476 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4477 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4478 
   4479 			/* Device Status */
   4480 			if (sc->sc_type == WM_T_ICH8) {
   4481 				reg = CSR_READ(sc, WMREG_STATUS);
   4482 				reg &= ~__BIT(31);
   4483 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4484 
   4485 			}
   4486 
   4487 			/* IOSFPC */
   4488 			if (sc->sc_type == WM_T_PCH_SPT) {
   4489 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4490 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4491 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4492 			}
   4493 			/*
   4494 			 * Work-around descriptor data corruption issue during
   4495 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4496 			 * capability.
   4497 			 */
   4498 			reg = CSR_READ(sc, WMREG_RFCTL);
   4499 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4500 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4501 			break;
   4502 		default:
   4503 			break;
   4504 		}
   4505 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4506 
   4507 		switch (sc->sc_type) {
   4508 		/*
   4509 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4510 		 * Avoid RSS Hash Value bug.
   4511 		 */
   4512 		case WM_T_82571:
   4513 		case WM_T_82572:
   4514 		case WM_T_82573:
   4515 		case WM_T_80003:
   4516 		case WM_T_ICH8:
   4517 			reg = CSR_READ(sc, WMREG_RFCTL);
   4518 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4519 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4520 			break;
   4521 		case WM_T_82574:
   4522 			/* Use extened Rx descriptor. */
   4523 			reg = CSR_READ(sc, WMREG_RFCTL);
   4524 			reg |= WMREG_RFCTL_EXSTEN;
   4525 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4526 			break;
   4527 		default:
   4528 			break;
   4529 		}
   4530 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4531 		/*
   4532 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4533 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4534 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4535 		 * Correctly by the Device"
   4536 		 *
   4537 		 * I354(C2000) Errata AVR53:
   4538 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4539 		 * Hang"
   4540 		 */
   4541 		reg = CSR_READ(sc, WMREG_RFCTL);
   4542 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4543 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4544 	}
   4545 }
   4546 
   4547 static uint32_t
   4548 wm_rxpbs_adjust_82580(uint32_t val)
   4549 {
   4550 	uint32_t rv = 0;
   4551 
   4552 	if (val < __arraycount(wm_82580_rxpbs_table))
   4553 		rv = wm_82580_rxpbs_table[val];
   4554 
   4555 	return rv;
   4556 }
   4557 
   4558 /*
   4559  * wm_reset_phy:
   4560  *
   4561  *	generic PHY reset function.
   4562  *	Same as e1000_phy_hw_reset_generic()
   4563  */
   4564 static int
   4565 wm_reset_phy(struct wm_softc *sc)
   4566 {
   4567 	uint32_t reg;
   4568 
   4569 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4570 		device_xname(sc->sc_dev), __func__));
   4571 	if (wm_phy_resetisblocked(sc))
   4572 		return -1;
   4573 
   4574 	sc->phy.acquire(sc);
   4575 
   4576 	reg = CSR_READ(sc, WMREG_CTRL);
   4577 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4578 	CSR_WRITE_FLUSH(sc);
   4579 
   4580 	delay(sc->phy.reset_delay_us);
   4581 
   4582 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4583 	CSR_WRITE_FLUSH(sc);
   4584 
   4585 	delay(150);
   4586 
   4587 	sc->phy.release(sc);
   4588 
   4589 	wm_get_cfg_done(sc);
   4590 	wm_phy_post_reset(sc);
   4591 
   4592 	return 0;
   4593 }
   4594 
   4595 /*
   4596  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4597  * so it is enough to check sc->sc_queue[0] only.
   4598  */
   4599 static void
   4600 wm_flush_desc_rings(struct wm_softc *sc)
   4601 {
   4602 	pcireg_t preg;
   4603 	uint32_t reg;
   4604 	struct wm_txqueue *txq;
   4605 	wiseman_txdesc_t *txd;
   4606 	int nexttx;
   4607 	uint32_t rctl;
   4608 
   4609 	/* First, disable MULR fix in FEXTNVM11 */
   4610 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4611 	reg |= FEXTNVM11_DIS_MULRFIX;
   4612 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4613 
   4614 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4615 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4616 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4617 		return;
   4618 
   4619 	/* TX */
   4620 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4621 	    preg, reg);
   4622 	reg = CSR_READ(sc, WMREG_TCTL);
   4623 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4624 
   4625 	txq = &sc->sc_queue[0].wmq_txq;
   4626 	nexttx = txq->txq_next;
   4627 	txd = &txq->txq_descs[nexttx];
   4628 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4629 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4630 	txd->wtx_fields.wtxu_status = 0;
   4631 	txd->wtx_fields.wtxu_options = 0;
   4632 	txd->wtx_fields.wtxu_vlan = 0;
   4633 
   4634 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4635 	    BUS_SPACE_BARRIER_WRITE);
   4636 
   4637 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4638 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4639 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4640 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4641 	delay(250);
   4642 
   4643 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4644 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4645 		return;
   4646 
   4647 	/* RX */
   4648 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4649 	rctl = CSR_READ(sc, WMREG_RCTL);
   4650 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4651 	CSR_WRITE_FLUSH(sc);
   4652 	delay(150);
   4653 
   4654 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4655 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4656 	reg &= 0xffffc000;
   4657 	/*
   4658 	 * Update thresholds: prefetch threshold to 31, host threshold
   4659 	 * to 1 and make sure the granularity is "descriptors" and not
   4660 	 * "cache lines"
   4661 	 */
   4662 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4663 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4664 
   4665 	/* Momentarily enable the RX ring for the changes to take effect */
   4666 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4667 	CSR_WRITE_FLUSH(sc);
   4668 	delay(150);
   4669 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4670 }
   4671 
   4672 /*
   4673  * wm_reset:
   4674  *
   4675  *	Reset the i82542 chip.
   4676  */
   4677 static void
   4678 wm_reset(struct wm_softc *sc)
   4679 {
   4680 	int phy_reset = 0;
   4681 	int i, error = 0;
   4682 	uint32_t reg;
   4683 	uint16_t kmreg;
   4684 	int rv;
   4685 
   4686 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4687 		device_xname(sc->sc_dev), __func__));
   4688 	KASSERT(sc->sc_type != 0);
   4689 
   4690 	/*
   4691 	 * Allocate on-chip memory according to the MTU size.
   4692 	 * The Packet Buffer Allocation register must be written
   4693 	 * before the chip is reset.
   4694 	 */
   4695 	switch (sc->sc_type) {
   4696 	case WM_T_82547:
   4697 	case WM_T_82547_2:
   4698 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4699 		    PBA_22K : PBA_30K;
   4700 		for (i = 0; i < sc->sc_nqueues; i++) {
   4701 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4702 			txq->txq_fifo_head = 0;
   4703 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4704 			txq->txq_fifo_size =
   4705 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4706 			txq->txq_fifo_stall = 0;
   4707 		}
   4708 		break;
   4709 	case WM_T_82571:
   4710 	case WM_T_82572:
   4711 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4712 	case WM_T_80003:
   4713 		sc->sc_pba = PBA_32K;
   4714 		break;
   4715 	case WM_T_82573:
   4716 		sc->sc_pba = PBA_12K;
   4717 		break;
   4718 	case WM_T_82574:
   4719 	case WM_T_82583:
   4720 		sc->sc_pba = PBA_20K;
   4721 		break;
   4722 	case WM_T_82576:
   4723 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4724 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4725 		break;
   4726 	case WM_T_82580:
   4727 	case WM_T_I350:
   4728 	case WM_T_I354:
   4729 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4730 		break;
   4731 	case WM_T_I210:
   4732 	case WM_T_I211:
   4733 		sc->sc_pba = PBA_34K;
   4734 		break;
   4735 	case WM_T_ICH8:
   4736 		/* Workaround for a bit corruption issue in FIFO memory */
   4737 		sc->sc_pba = PBA_8K;
   4738 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4739 		break;
   4740 	case WM_T_ICH9:
   4741 	case WM_T_ICH10:
   4742 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4743 		    PBA_14K : PBA_10K;
   4744 		break;
   4745 	case WM_T_PCH:
   4746 	case WM_T_PCH2:	/* XXX 14K? */
   4747 	case WM_T_PCH_LPT:
   4748 	case WM_T_PCH_SPT:
   4749 	case WM_T_PCH_CNP:
   4750 		sc->sc_pba = PBA_26K;
   4751 		break;
   4752 	default:
   4753 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4754 		    PBA_40K : PBA_48K;
   4755 		break;
   4756 	}
   4757 	/*
   4758 	 * Only old or non-multiqueue devices have the PBA register
   4759 	 * XXX Need special handling for 82575.
   4760 	 */
   4761 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4762 	    || (sc->sc_type == WM_T_82575))
   4763 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4764 
   4765 	/* Prevent the PCI-E bus from sticking */
   4766 	if (sc->sc_flags & WM_F_PCIE) {
   4767 		int timeout = 800;
   4768 
   4769 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4770 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4771 
   4772 		while (timeout--) {
   4773 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4774 			    == 0)
   4775 				break;
   4776 			delay(100);
   4777 		}
   4778 		if (timeout == 0)
   4779 			device_printf(sc->sc_dev,
   4780 			    "failed to disable busmastering\n");
   4781 	}
   4782 
   4783 	/* Set the completion timeout for interface */
   4784 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4785 	    || (sc->sc_type == WM_T_82580)
   4786 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4787 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4788 		wm_set_pcie_completion_timeout(sc);
   4789 
   4790 	/* Clear interrupt */
   4791 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4792 	if (wm_is_using_msix(sc)) {
   4793 		if (sc->sc_type != WM_T_82574) {
   4794 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4795 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4796 		} else
   4797 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4798 	}
   4799 
   4800 	/* Stop the transmit and receive processes. */
   4801 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4802 	sc->sc_rctl &= ~RCTL_EN;
   4803 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4804 	CSR_WRITE_FLUSH(sc);
   4805 
   4806 	/* XXX set_tbi_sbp_82543() */
   4807 
   4808 	delay(10*1000);
   4809 
   4810 	/* Must acquire the MDIO ownership before MAC reset */
   4811 	switch (sc->sc_type) {
   4812 	case WM_T_82573:
   4813 	case WM_T_82574:
   4814 	case WM_T_82583:
   4815 		error = wm_get_hw_semaphore_82573(sc);
   4816 		break;
   4817 	default:
   4818 		break;
   4819 	}
   4820 
   4821 	/*
   4822 	 * 82541 Errata 29? & 82547 Errata 28?
   4823 	 * See also the description about PHY_RST bit in CTRL register
   4824 	 * in 8254x_GBe_SDM.pdf.
   4825 	 */
   4826 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4827 		CSR_WRITE(sc, WMREG_CTRL,
   4828 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4829 		CSR_WRITE_FLUSH(sc);
   4830 		delay(5000);
   4831 	}
   4832 
   4833 	switch (sc->sc_type) {
   4834 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4835 	case WM_T_82541:
   4836 	case WM_T_82541_2:
   4837 	case WM_T_82547:
   4838 	case WM_T_82547_2:
   4839 		/*
   4840 		 * On some chipsets, a reset through a memory-mapped write
   4841 		 * cycle can cause the chip to reset before completing the
   4842 		 * write cycle. This causes major headache that can be avoided
   4843 		 * by issuing the reset via indirect register writes through
   4844 		 * I/O space.
   4845 		 *
   4846 		 * So, if we successfully mapped the I/O BAR at attach time,
   4847 		 * use that. Otherwise, try our luck with a memory-mapped
   4848 		 * reset.
   4849 		 */
   4850 		if (sc->sc_flags & WM_F_IOH_VALID)
   4851 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4852 		else
   4853 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4854 		break;
   4855 	case WM_T_82545_3:
   4856 	case WM_T_82546_3:
   4857 		/* Use the shadow control register on these chips. */
   4858 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4859 		break;
   4860 	case WM_T_80003:
   4861 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4862 		sc->phy.acquire(sc);
   4863 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4864 		sc->phy.release(sc);
   4865 		break;
   4866 	case WM_T_ICH8:
   4867 	case WM_T_ICH9:
   4868 	case WM_T_ICH10:
   4869 	case WM_T_PCH:
   4870 	case WM_T_PCH2:
   4871 	case WM_T_PCH_LPT:
   4872 	case WM_T_PCH_SPT:
   4873 	case WM_T_PCH_CNP:
   4874 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4875 		if (wm_phy_resetisblocked(sc) == false) {
   4876 			/*
   4877 			 * Gate automatic PHY configuration by hardware on
   4878 			 * non-managed 82579
   4879 			 */
   4880 			if ((sc->sc_type == WM_T_PCH2)
   4881 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4882 				== 0))
   4883 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4884 
   4885 			reg |= CTRL_PHY_RESET;
   4886 			phy_reset = 1;
   4887 		} else
   4888 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4889 		sc->phy.acquire(sc);
   4890 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4891 		/* Don't insert a completion barrier when reset */
   4892 		delay(20*1000);
   4893 		mutex_exit(sc->sc_ich_phymtx);
   4894 		break;
   4895 	case WM_T_82580:
   4896 	case WM_T_I350:
   4897 	case WM_T_I354:
   4898 	case WM_T_I210:
   4899 	case WM_T_I211:
   4900 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4901 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4902 			CSR_WRITE_FLUSH(sc);
   4903 		delay(5000);
   4904 		break;
   4905 	case WM_T_82542_2_0:
   4906 	case WM_T_82542_2_1:
   4907 	case WM_T_82543:
   4908 	case WM_T_82540:
   4909 	case WM_T_82545:
   4910 	case WM_T_82546:
   4911 	case WM_T_82571:
   4912 	case WM_T_82572:
   4913 	case WM_T_82573:
   4914 	case WM_T_82574:
   4915 	case WM_T_82575:
   4916 	case WM_T_82576:
   4917 	case WM_T_82583:
   4918 	default:
   4919 		/* Everything else can safely use the documented method. */
   4920 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4921 		break;
   4922 	}
   4923 
   4924 	/* Must release the MDIO ownership after MAC reset */
   4925 	switch (sc->sc_type) {
   4926 	case WM_T_82573:
   4927 	case WM_T_82574:
   4928 	case WM_T_82583:
   4929 		if (error == 0)
   4930 			wm_put_hw_semaphore_82573(sc);
   4931 		break;
   4932 	default:
   4933 		break;
   4934 	}
   4935 
   4936 	/* Set Phy Config Counter to 50msec */
   4937 	if (sc->sc_type == WM_T_PCH2) {
   4938 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4939 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4940 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4941 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4942 	}
   4943 
   4944 	if (phy_reset != 0)
   4945 		wm_get_cfg_done(sc);
   4946 
   4947 	/* Reload EEPROM */
   4948 	switch (sc->sc_type) {
   4949 	case WM_T_82542_2_0:
   4950 	case WM_T_82542_2_1:
   4951 	case WM_T_82543:
   4952 	case WM_T_82544:
   4953 		delay(10);
   4954 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4955 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4956 		CSR_WRITE_FLUSH(sc);
   4957 		delay(2000);
   4958 		break;
   4959 	case WM_T_82540:
   4960 	case WM_T_82545:
   4961 	case WM_T_82545_3:
   4962 	case WM_T_82546:
   4963 	case WM_T_82546_3:
   4964 		delay(5*1000);
   4965 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4966 		break;
   4967 	case WM_T_82541:
   4968 	case WM_T_82541_2:
   4969 	case WM_T_82547:
   4970 	case WM_T_82547_2:
   4971 		delay(20000);
   4972 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4973 		break;
   4974 	case WM_T_82571:
   4975 	case WM_T_82572:
   4976 	case WM_T_82573:
   4977 	case WM_T_82574:
   4978 	case WM_T_82583:
   4979 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4980 			delay(10);
   4981 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4982 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4983 			CSR_WRITE_FLUSH(sc);
   4984 		}
   4985 		/* check EECD_EE_AUTORD */
   4986 		wm_get_auto_rd_done(sc);
   4987 		/*
   4988 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4989 		 * is set.
   4990 		 */
   4991 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4992 		    || (sc->sc_type == WM_T_82583))
   4993 			delay(25*1000);
   4994 		break;
   4995 	case WM_T_82575:
   4996 	case WM_T_82576:
   4997 	case WM_T_82580:
   4998 	case WM_T_I350:
   4999 	case WM_T_I354:
   5000 	case WM_T_I210:
   5001 	case WM_T_I211:
   5002 	case WM_T_80003:
   5003 		/* check EECD_EE_AUTORD */
   5004 		wm_get_auto_rd_done(sc);
   5005 		break;
   5006 	case WM_T_ICH8:
   5007 	case WM_T_ICH9:
   5008 	case WM_T_ICH10:
   5009 	case WM_T_PCH:
   5010 	case WM_T_PCH2:
   5011 	case WM_T_PCH_LPT:
   5012 	case WM_T_PCH_SPT:
   5013 	case WM_T_PCH_CNP:
   5014 		break;
   5015 	default:
   5016 		panic("%s: unknown type\n", __func__);
   5017 	}
   5018 
   5019 	/* Check whether EEPROM is present or not */
   5020 	switch (sc->sc_type) {
   5021 	case WM_T_82575:
   5022 	case WM_T_82576:
   5023 	case WM_T_82580:
   5024 	case WM_T_I350:
   5025 	case WM_T_I354:
   5026 	case WM_T_ICH8:
   5027 	case WM_T_ICH9:
   5028 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5029 			/* Not found */
   5030 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5031 			if (sc->sc_type == WM_T_82575)
   5032 				wm_reset_init_script_82575(sc);
   5033 		}
   5034 		break;
   5035 	default:
   5036 		break;
   5037 	}
   5038 
   5039 	if (phy_reset != 0)
   5040 		wm_phy_post_reset(sc);
   5041 
   5042 	if ((sc->sc_type == WM_T_82580)
   5043 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5044 		/* Clear global device reset status bit */
   5045 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5046 	}
   5047 
   5048 	/* Clear any pending interrupt events. */
   5049 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5050 	reg = CSR_READ(sc, WMREG_ICR);
   5051 	if (wm_is_using_msix(sc)) {
   5052 		if (sc->sc_type != WM_T_82574) {
   5053 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5054 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5055 		} else
   5056 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5057 	}
   5058 
   5059 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5060 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5061 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5062 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5063 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5064 		reg |= KABGTXD_BGSQLBIAS;
   5065 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5066 	}
   5067 
   5068 	/* Reload sc_ctrl */
   5069 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5070 
   5071 	wm_set_eee(sc);
   5072 
   5073 	/*
   5074 	 * For PCH, this write will make sure that any noise will be detected
   5075 	 * as a CRC error and be dropped rather than show up as a bad packet
   5076 	 * to the DMA engine
   5077 	 */
   5078 	if (sc->sc_type == WM_T_PCH)
   5079 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5080 
   5081 	if (sc->sc_type >= WM_T_82544)
   5082 		CSR_WRITE(sc, WMREG_WUC, 0);
   5083 
   5084 	if (sc->sc_type < WM_T_82575)
   5085 		wm_disable_aspm(sc); /* Workaround for some chips */
   5086 
   5087 	wm_reset_mdicnfg_82580(sc);
   5088 
   5089 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5090 		wm_pll_workaround_i210(sc);
   5091 
   5092 	if (sc->sc_type == WM_T_80003) {
   5093 		/* Default to TRUE to enable the MDIC W/A */
   5094 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5095 
   5096 		rv = wm_kmrn_readreg(sc,
   5097 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5098 		if (rv == 0) {
   5099 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5100 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5101 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5102 			else
   5103 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5104 		}
   5105 	}
   5106 }
   5107 
   5108 /*
   5109  * wm_add_rxbuf:
   5110  *
   5111  *	Add a receive buffer to the indiciated descriptor.
   5112  */
   5113 static int
   5114 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5115 {
   5116 	struct wm_softc *sc = rxq->rxq_sc;
   5117 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5118 	struct mbuf *m;
   5119 	int error;
   5120 
   5121 	KASSERT(mutex_owned(rxq->rxq_lock));
   5122 
   5123 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5124 	if (m == NULL)
   5125 		return ENOBUFS;
   5126 
   5127 	MCLGET(m, M_DONTWAIT);
   5128 	if ((m->m_flags & M_EXT) == 0) {
   5129 		m_freem(m);
   5130 		return ENOBUFS;
   5131 	}
   5132 
   5133 	if (rxs->rxs_mbuf != NULL)
   5134 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5135 
   5136 	rxs->rxs_mbuf = m;
   5137 
   5138 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5139 	/*
   5140 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5141 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5142 	 */
   5143 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5144 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5145 	if (error) {
   5146 		/* XXX XXX XXX */
   5147 		aprint_error_dev(sc->sc_dev,
   5148 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5149 		panic("wm_add_rxbuf");
   5150 	}
   5151 
   5152 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5153 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5154 
   5155 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5156 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5157 			wm_init_rxdesc(rxq, idx);
   5158 	} else
   5159 		wm_init_rxdesc(rxq, idx);
   5160 
   5161 	return 0;
   5162 }
   5163 
   5164 /*
   5165  * wm_rxdrain:
   5166  *
   5167  *	Drain the receive queue.
   5168  */
   5169 static void
   5170 wm_rxdrain(struct wm_rxqueue *rxq)
   5171 {
   5172 	struct wm_softc *sc = rxq->rxq_sc;
   5173 	struct wm_rxsoft *rxs;
   5174 	int i;
   5175 
   5176 	KASSERT(mutex_owned(rxq->rxq_lock));
   5177 
   5178 	for (i = 0; i < WM_NRXDESC; i++) {
   5179 		rxs = &rxq->rxq_soft[i];
   5180 		if (rxs->rxs_mbuf != NULL) {
   5181 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5182 			m_freem(rxs->rxs_mbuf);
   5183 			rxs->rxs_mbuf = NULL;
   5184 		}
   5185 	}
   5186 }
   5187 
   5188 /*
   5189  * Setup registers for RSS.
   5190  *
   5191  * XXX not yet VMDq support
   5192  */
   5193 static void
   5194 wm_init_rss(struct wm_softc *sc)
   5195 {
   5196 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5197 	int i;
   5198 
   5199 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5200 
   5201 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5202 		unsigned int qid, reta_ent;
   5203 
   5204 		qid  = i % sc->sc_nqueues;
   5205 		switch (sc->sc_type) {
   5206 		case WM_T_82574:
   5207 			reta_ent = __SHIFTIN(qid,
   5208 			    RETA_ENT_QINDEX_MASK_82574);
   5209 			break;
   5210 		case WM_T_82575:
   5211 			reta_ent = __SHIFTIN(qid,
   5212 			    RETA_ENT_QINDEX1_MASK_82575);
   5213 			break;
   5214 		default:
   5215 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5216 			break;
   5217 		}
   5218 
   5219 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5220 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5221 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5222 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5223 	}
   5224 
   5225 	rss_getkey((uint8_t *)rss_key);
   5226 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5227 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5228 
   5229 	if (sc->sc_type == WM_T_82574)
   5230 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5231 	else
   5232 		mrqc = MRQC_ENABLE_RSS_MQ;
   5233 
   5234 	/*
   5235 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5236 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5237 	 */
   5238 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5239 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5240 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5241 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5242 
   5243 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5244 }
   5245 
   5246 /*
   5247  * Adjust TX and RX queue numbers which the system actulally uses.
   5248  *
   5249  * The numbers are affected by below parameters.
   5250  *     - The nubmer of hardware queues
   5251  *     - The number of MSI-X vectors (= "nvectors" argument)
   5252  *     - ncpu
   5253  */
   5254 static void
   5255 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5256 {
   5257 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5258 
   5259 	if (nvectors < 2) {
   5260 		sc->sc_nqueues = 1;
   5261 		return;
   5262 	}
   5263 
   5264 	switch (sc->sc_type) {
   5265 	case WM_T_82572:
   5266 		hw_ntxqueues = 2;
   5267 		hw_nrxqueues = 2;
   5268 		break;
   5269 	case WM_T_82574:
   5270 		hw_ntxqueues = 2;
   5271 		hw_nrxqueues = 2;
   5272 		break;
   5273 	case WM_T_82575:
   5274 		hw_ntxqueues = 4;
   5275 		hw_nrxqueues = 4;
   5276 		break;
   5277 	case WM_T_82576:
   5278 		hw_ntxqueues = 16;
   5279 		hw_nrxqueues = 16;
   5280 		break;
   5281 	case WM_T_82580:
   5282 	case WM_T_I350:
   5283 	case WM_T_I354:
   5284 		hw_ntxqueues = 8;
   5285 		hw_nrxqueues = 8;
   5286 		break;
   5287 	case WM_T_I210:
   5288 		hw_ntxqueues = 4;
   5289 		hw_nrxqueues = 4;
   5290 		break;
   5291 	case WM_T_I211:
   5292 		hw_ntxqueues = 2;
   5293 		hw_nrxqueues = 2;
   5294 		break;
   5295 		/*
   5296 		 * As below ethernet controllers does not support MSI-X,
   5297 		 * this driver let them not use multiqueue.
   5298 		 *     - WM_T_80003
   5299 		 *     - WM_T_ICH8
   5300 		 *     - WM_T_ICH9
   5301 		 *     - WM_T_ICH10
   5302 		 *     - WM_T_PCH
   5303 		 *     - WM_T_PCH2
   5304 		 *     - WM_T_PCH_LPT
   5305 		 */
   5306 	default:
   5307 		hw_ntxqueues = 1;
   5308 		hw_nrxqueues = 1;
   5309 		break;
   5310 	}
   5311 
   5312 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5313 
   5314 	/*
   5315 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5316 	 * the number of queues used actually.
   5317 	 */
   5318 	if (nvectors < hw_nqueues + 1)
   5319 		sc->sc_nqueues = nvectors - 1;
   5320 	else
   5321 		sc->sc_nqueues = hw_nqueues;
   5322 
   5323 	/*
   5324 	 * As queues more then cpus cannot improve scaling, we limit
   5325 	 * the number of queues used actually.
   5326 	 */
   5327 	if (ncpu < sc->sc_nqueues)
   5328 		sc->sc_nqueues = ncpu;
   5329 }
   5330 
   5331 static inline bool
   5332 wm_is_using_msix(struct wm_softc *sc)
   5333 {
   5334 
   5335 	return (sc->sc_nintrs > 1);
   5336 }
   5337 
   5338 static inline bool
   5339 wm_is_using_multiqueue(struct wm_softc *sc)
   5340 {
   5341 
   5342 	return (sc->sc_nqueues > 1);
   5343 }
   5344 
   5345 static int
   5346 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5347 {
   5348 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5349 	wmq->wmq_id = qidx;
   5350 	wmq->wmq_intr_idx = intr_idx;
   5351 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5352 #ifdef WM_MPSAFE
   5353 	    | SOFTINT_MPSAFE
   5354 #endif
   5355 	    , wm_handle_queue, wmq);
   5356 	if (wmq->wmq_si != NULL)
   5357 		return 0;
   5358 
   5359 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5360 	    wmq->wmq_id);
   5361 
   5362 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5363 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5364 	return ENOMEM;
   5365 }
   5366 
   5367 /*
   5368  * Both single interrupt MSI and INTx can use this function.
   5369  */
   5370 static int
   5371 wm_setup_legacy(struct wm_softc *sc)
   5372 {
   5373 	pci_chipset_tag_t pc = sc->sc_pc;
   5374 	const char *intrstr = NULL;
   5375 	char intrbuf[PCI_INTRSTR_LEN];
   5376 	int error;
   5377 
   5378 	error = wm_alloc_txrx_queues(sc);
   5379 	if (error) {
   5380 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5381 		    error);
   5382 		return ENOMEM;
   5383 	}
   5384 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5385 	    sizeof(intrbuf));
   5386 #ifdef WM_MPSAFE
   5387 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5388 #endif
   5389 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5390 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5391 	if (sc->sc_ihs[0] == NULL) {
   5392 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5393 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5394 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5395 		return ENOMEM;
   5396 	}
   5397 
   5398 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5399 	sc->sc_nintrs = 1;
   5400 
   5401 	return wm_softint_establish(sc, 0, 0);
   5402 }
   5403 
   5404 static int
   5405 wm_setup_msix(struct wm_softc *sc)
   5406 {
   5407 	void *vih;
   5408 	kcpuset_t *affinity;
   5409 	int qidx, error, intr_idx, txrx_established;
   5410 	pci_chipset_tag_t pc = sc->sc_pc;
   5411 	const char *intrstr = NULL;
   5412 	char intrbuf[PCI_INTRSTR_LEN];
   5413 	char intr_xname[INTRDEVNAMEBUF];
   5414 
   5415 	if (sc->sc_nqueues < ncpu) {
   5416 		/*
   5417 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5418 		 * interrupts start from CPU#1.
   5419 		 */
   5420 		sc->sc_affinity_offset = 1;
   5421 	} else {
   5422 		/*
   5423 		 * In this case, this device use all CPUs. So, we unify
   5424 		 * affinitied cpu_index to msix vector number for readability.
   5425 		 */
   5426 		sc->sc_affinity_offset = 0;
   5427 	}
   5428 
   5429 	error = wm_alloc_txrx_queues(sc);
   5430 	if (error) {
   5431 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5432 		    error);
   5433 		return ENOMEM;
   5434 	}
   5435 
   5436 	kcpuset_create(&affinity, false);
   5437 	intr_idx = 0;
   5438 
   5439 	/*
   5440 	 * TX and RX
   5441 	 */
   5442 	txrx_established = 0;
   5443 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5444 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5445 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5446 
   5447 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5448 		    sizeof(intrbuf));
   5449 #ifdef WM_MPSAFE
   5450 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5451 		    PCI_INTR_MPSAFE, true);
   5452 #endif
   5453 		memset(intr_xname, 0, sizeof(intr_xname));
   5454 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5455 		    device_xname(sc->sc_dev), qidx);
   5456 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5457 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5458 		if (vih == NULL) {
   5459 			aprint_error_dev(sc->sc_dev,
   5460 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5461 			    intrstr ? " at " : "",
   5462 			    intrstr ? intrstr : "");
   5463 
   5464 			goto fail;
   5465 		}
   5466 		kcpuset_zero(affinity);
   5467 		/* Round-robin affinity */
   5468 		kcpuset_set(affinity, affinity_to);
   5469 		error = interrupt_distribute(vih, affinity, NULL);
   5470 		if (error == 0) {
   5471 			aprint_normal_dev(sc->sc_dev,
   5472 			    "for TX and RX interrupting at %s affinity to %u\n",
   5473 			    intrstr, affinity_to);
   5474 		} else {
   5475 			aprint_normal_dev(sc->sc_dev,
   5476 			    "for TX and RX interrupting at %s\n", intrstr);
   5477 		}
   5478 		sc->sc_ihs[intr_idx] = vih;
   5479 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5480 			goto fail;
   5481 		txrx_established++;
   5482 		intr_idx++;
   5483 	}
   5484 
   5485 	/* LINK */
   5486 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5487 	    sizeof(intrbuf));
   5488 #ifdef WM_MPSAFE
   5489 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5490 #endif
   5491 	memset(intr_xname, 0, sizeof(intr_xname));
   5492 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5493 	    device_xname(sc->sc_dev));
   5494 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5495 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5496 	if (vih == NULL) {
   5497 		aprint_error_dev(sc->sc_dev,
   5498 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5499 		    intrstr ? " at " : "",
   5500 		    intrstr ? intrstr : "");
   5501 
   5502 		goto fail;
   5503 	}
   5504 	/* Keep default affinity to LINK interrupt */
   5505 	aprint_normal_dev(sc->sc_dev,
   5506 	    "for LINK interrupting at %s\n", intrstr);
   5507 	sc->sc_ihs[intr_idx] = vih;
   5508 	sc->sc_link_intr_idx = intr_idx;
   5509 
   5510 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5511 	kcpuset_destroy(affinity);
   5512 	return 0;
   5513 
   5514  fail:
   5515 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5516 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5517 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5518 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5519 	}
   5520 
   5521 	kcpuset_destroy(affinity);
   5522 	return ENOMEM;
   5523 }
   5524 
   5525 static void
   5526 wm_unset_stopping_flags(struct wm_softc *sc)
   5527 {
   5528 	int i;
   5529 
   5530 	KASSERT(WM_CORE_LOCKED(sc));
   5531 
   5532 	/* Must unset stopping flags in ascending order. */
   5533 	for (i = 0; i < sc->sc_nqueues; i++) {
   5534 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5535 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5536 
   5537 		mutex_enter(txq->txq_lock);
   5538 		txq->txq_stopping = false;
   5539 		mutex_exit(txq->txq_lock);
   5540 
   5541 		mutex_enter(rxq->rxq_lock);
   5542 		rxq->rxq_stopping = false;
   5543 		mutex_exit(rxq->rxq_lock);
   5544 	}
   5545 
   5546 	sc->sc_core_stopping = false;
   5547 }
   5548 
   5549 static void
   5550 wm_set_stopping_flags(struct wm_softc *sc)
   5551 {
   5552 	int i;
   5553 
   5554 	KASSERT(WM_CORE_LOCKED(sc));
   5555 
   5556 	sc->sc_core_stopping = true;
   5557 
   5558 	/* Must set stopping flags in ascending order. */
   5559 	for (i = 0; i < sc->sc_nqueues; i++) {
   5560 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5561 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5562 
   5563 		mutex_enter(rxq->rxq_lock);
   5564 		rxq->rxq_stopping = true;
   5565 		mutex_exit(rxq->rxq_lock);
   5566 
   5567 		mutex_enter(txq->txq_lock);
   5568 		txq->txq_stopping = true;
   5569 		mutex_exit(txq->txq_lock);
   5570 	}
   5571 }
   5572 
   5573 /*
   5574  * Write interrupt interval value to ITR or EITR
   5575  */
   5576 static void
   5577 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5578 {
   5579 
   5580 	if (!wmq->wmq_set_itr)
   5581 		return;
   5582 
   5583 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5584 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5585 
   5586 		/*
   5587 		 * 82575 doesn't have CNT_INGR field.
   5588 		 * So, overwrite counter field by software.
   5589 		 */
   5590 		if (sc->sc_type == WM_T_82575)
   5591 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5592 		else
   5593 			eitr |= EITR_CNT_INGR;
   5594 
   5595 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5596 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5597 		/*
   5598 		 * 82574 has both ITR and EITR. SET EITR when we use
   5599 		 * the multi queue function with MSI-X.
   5600 		 */
   5601 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5602 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5603 	} else {
   5604 		KASSERT(wmq->wmq_id == 0);
   5605 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5606 	}
   5607 
   5608 	wmq->wmq_set_itr = false;
   5609 }
   5610 
   5611 /*
   5612  * TODO
   5613  * Below dynamic calculation of itr is almost the same as linux igb,
   5614  * however it does not fit to wm(4). So, we will have been disable AIM
   5615  * until we will find appropriate calculation of itr.
   5616  */
   5617 /*
   5618  * calculate interrupt interval value to be going to write register in
   5619  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5620  */
   5621 static void
   5622 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5623 {
   5624 #ifdef NOTYET
   5625 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5626 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5627 	uint32_t avg_size = 0;
   5628 	uint32_t new_itr;
   5629 
   5630 	if (rxq->rxq_packets)
   5631 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5632 	if (txq->txq_packets)
   5633 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5634 
   5635 	if (avg_size == 0) {
   5636 		new_itr = 450; /* restore default value */
   5637 		goto out;
   5638 	}
   5639 
   5640 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5641 	avg_size += 24;
   5642 
   5643 	/* Don't starve jumbo frames */
   5644 	avg_size = uimin(avg_size, 3000);
   5645 
   5646 	/* Give a little boost to mid-size frames */
   5647 	if ((avg_size > 300) && (avg_size < 1200))
   5648 		new_itr = avg_size / 3;
   5649 	else
   5650 		new_itr = avg_size / 2;
   5651 
   5652 out:
   5653 	/*
   5654 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5655 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5656 	 */
   5657 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5658 		new_itr *= 4;
   5659 
   5660 	if (new_itr != wmq->wmq_itr) {
   5661 		wmq->wmq_itr = new_itr;
   5662 		wmq->wmq_set_itr = true;
   5663 	} else
   5664 		wmq->wmq_set_itr = false;
   5665 
   5666 	rxq->rxq_packets = 0;
   5667 	rxq->rxq_bytes = 0;
   5668 	txq->txq_packets = 0;
   5669 	txq->txq_bytes = 0;
   5670 #endif
   5671 }
   5672 
   5673 /*
   5674  * wm_init:		[ifnet interface function]
   5675  *
   5676  *	Initialize the interface.
   5677  */
   5678 static int
   5679 wm_init(struct ifnet *ifp)
   5680 {
   5681 	struct wm_softc *sc = ifp->if_softc;
   5682 	int ret;
   5683 
   5684 	WM_CORE_LOCK(sc);
   5685 	ret = wm_init_locked(ifp);
   5686 	WM_CORE_UNLOCK(sc);
   5687 
   5688 	return ret;
   5689 }
   5690 
   5691 static int
   5692 wm_init_locked(struct ifnet *ifp)
   5693 {
   5694 	struct wm_softc *sc = ifp->if_softc;
   5695 	struct ethercom *ec = &sc->sc_ethercom;
   5696 	int i, j, trynum, error = 0;
   5697 	uint32_t reg;
   5698 
   5699 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5700 		device_xname(sc->sc_dev), __func__));
   5701 	KASSERT(WM_CORE_LOCKED(sc));
   5702 
   5703 	/*
   5704 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5705 	 * There is a small but measurable benefit to avoiding the adjusment
   5706 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5707 	 * on such platforms.  One possibility is that the DMA itself is
   5708 	 * slightly more efficient if the front of the entire packet (instead
   5709 	 * of the front of the headers) is aligned.
   5710 	 *
   5711 	 * Note we must always set align_tweak to 0 if we are using
   5712 	 * jumbo frames.
   5713 	 */
   5714 #ifdef __NO_STRICT_ALIGNMENT
   5715 	sc->sc_align_tweak = 0;
   5716 #else
   5717 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5718 		sc->sc_align_tweak = 0;
   5719 	else
   5720 		sc->sc_align_tweak = 2;
   5721 #endif /* __NO_STRICT_ALIGNMENT */
   5722 
   5723 	/* Cancel any pending I/O. */
   5724 	wm_stop_locked(ifp, 0);
   5725 
   5726 	/* Update statistics before reset */
   5727 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5728 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5729 
   5730 	/* PCH_SPT hardware workaround */
   5731 	if (sc->sc_type == WM_T_PCH_SPT)
   5732 		wm_flush_desc_rings(sc);
   5733 
   5734 	/* Reset the chip to a known state. */
   5735 	wm_reset(sc);
   5736 
   5737 	/*
   5738 	 * AMT based hardware can now take control from firmware
   5739 	 * Do this after reset.
   5740 	 */
   5741 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5742 		wm_get_hw_control(sc);
   5743 
   5744 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5745 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5746 		wm_legacy_irq_quirk_spt(sc);
   5747 
   5748 	/* Init hardware bits */
   5749 	wm_initialize_hardware_bits(sc);
   5750 
   5751 	/* Reset the PHY. */
   5752 	if (sc->sc_flags & WM_F_HAS_MII)
   5753 		wm_gmii_reset(sc);
   5754 
   5755 	if (sc->sc_type >= WM_T_ICH8) {
   5756 		reg = CSR_READ(sc, WMREG_GCR);
   5757 		/*
   5758 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5759 		 * default after reset.
   5760 		 */
   5761 		if (sc->sc_type == WM_T_ICH8)
   5762 			reg |= GCR_NO_SNOOP_ALL;
   5763 		else
   5764 			reg &= ~GCR_NO_SNOOP_ALL;
   5765 		CSR_WRITE(sc, WMREG_GCR, reg);
   5766 	}
   5767 	if ((sc->sc_type >= WM_T_ICH8)
   5768 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5769 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5770 
   5771 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5772 		reg |= CTRL_EXT_RO_DIS;
   5773 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5774 	}
   5775 
   5776 	/* Calculate (E)ITR value */
   5777 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5778 		/*
   5779 		 * For NEWQUEUE's EITR (except for 82575).
   5780 		 * 82575's EITR should be set same throttling value as other
   5781 		 * old controllers' ITR because the interrupt/sec calculation
   5782 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5783 		 *
   5784 		 * 82574's EITR should be set same throttling value as ITR.
   5785 		 *
   5786 		 * For N interrupts/sec, set this value to:
   5787 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5788 		 */
   5789 		sc->sc_itr_init = 450;
   5790 	} else if (sc->sc_type >= WM_T_82543) {
   5791 		/*
   5792 		 * Set up the interrupt throttling register (units of 256ns)
   5793 		 * Note that a footnote in Intel's documentation says this
   5794 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5795 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5796 		 * that that is also true for the 1024ns units of the other
   5797 		 * interrupt-related timer registers -- so, really, we ought
   5798 		 * to divide this value by 4 when the link speed is low.
   5799 		 *
   5800 		 * XXX implement this division at link speed change!
   5801 		 */
   5802 
   5803 		/*
   5804 		 * For N interrupts/sec, set this value to:
   5805 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5806 		 * absolute and packet timer values to this value
   5807 		 * divided by 4 to get "simple timer" behavior.
   5808 		 */
   5809 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5810 	}
   5811 
   5812 	error = wm_init_txrx_queues(sc);
   5813 	if (error)
   5814 		goto out;
   5815 
   5816 	/* Clear out the VLAN table -- we don't use it (yet). */
   5817 	CSR_WRITE(sc, WMREG_VET, 0);
   5818 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5819 		trynum = 10; /* Due to hw errata */
   5820 	else
   5821 		trynum = 1;
   5822 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5823 		for (j = 0; j < trynum; j++)
   5824 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5825 
   5826 	/*
   5827 	 * Set up flow-control parameters.
   5828 	 *
   5829 	 * XXX Values could probably stand some tuning.
   5830 	 */
   5831 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5832 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5833 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5834 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5835 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5836 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5837 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5838 	}
   5839 
   5840 	sc->sc_fcrtl = FCRTL_DFLT;
   5841 	if (sc->sc_type < WM_T_82543) {
   5842 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5843 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5844 	} else {
   5845 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5846 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5847 	}
   5848 
   5849 	if (sc->sc_type == WM_T_80003)
   5850 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5851 	else
   5852 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5853 
   5854 	/* Writes the control register. */
   5855 	wm_set_vlan(sc);
   5856 
   5857 	if (sc->sc_flags & WM_F_HAS_MII) {
   5858 		uint16_t kmreg;
   5859 
   5860 		switch (sc->sc_type) {
   5861 		case WM_T_80003:
   5862 		case WM_T_ICH8:
   5863 		case WM_T_ICH9:
   5864 		case WM_T_ICH10:
   5865 		case WM_T_PCH:
   5866 		case WM_T_PCH2:
   5867 		case WM_T_PCH_LPT:
   5868 		case WM_T_PCH_SPT:
   5869 		case WM_T_PCH_CNP:
   5870 			/*
   5871 			 * Set the mac to wait the maximum time between each
   5872 			 * iteration and increase the max iterations when
   5873 			 * polling the phy; this fixes erroneous timeouts at
   5874 			 * 10Mbps.
   5875 			 */
   5876 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5877 			    0xFFFF);
   5878 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5879 			    &kmreg);
   5880 			kmreg |= 0x3F;
   5881 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5882 			    kmreg);
   5883 			break;
   5884 		default:
   5885 			break;
   5886 		}
   5887 
   5888 		if (sc->sc_type == WM_T_80003) {
   5889 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5890 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5891 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5892 
   5893 			/* Bypass RX and TX FIFO's */
   5894 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5895 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5896 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5897 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5898 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5899 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5900 		}
   5901 	}
   5902 #if 0
   5903 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5904 #endif
   5905 
   5906 	/* Set up checksum offload parameters. */
   5907 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5908 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5909 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5910 		reg |= RXCSUM_IPOFL;
   5911 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5912 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5913 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5914 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5915 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5916 
   5917 	/* Set registers about MSI-X */
   5918 	if (wm_is_using_msix(sc)) {
   5919 		uint32_t ivar, qintr_idx;
   5920 		struct wm_queue *wmq;
   5921 		unsigned int qid;
   5922 
   5923 		if (sc->sc_type == WM_T_82575) {
   5924 			/* Interrupt control */
   5925 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5926 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5927 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5928 
   5929 			/* TX and RX */
   5930 			for (i = 0; i < sc->sc_nqueues; i++) {
   5931 				wmq = &sc->sc_queue[i];
   5932 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5933 				    EITR_TX_QUEUE(wmq->wmq_id)
   5934 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5935 			}
   5936 			/* Link status */
   5937 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5938 			    EITR_OTHER);
   5939 		} else if (sc->sc_type == WM_T_82574) {
   5940 			/* Interrupt control */
   5941 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5942 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5943 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5944 
   5945 			/*
   5946 			 * Workaround issue with spurious interrupts
   5947 			 * in MSI-X mode.
   5948 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5949 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5950 			 */
   5951 			reg = CSR_READ(sc, WMREG_RFCTL);
   5952 			reg |= WMREG_RFCTL_ACKDIS;
   5953 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5954 
   5955 			ivar = 0;
   5956 			/* TX and RX */
   5957 			for (i = 0; i < sc->sc_nqueues; i++) {
   5958 				wmq = &sc->sc_queue[i];
   5959 				qid = wmq->wmq_id;
   5960 				qintr_idx = wmq->wmq_intr_idx;
   5961 
   5962 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5963 				    IVAR_TX_MASK_Q_82574(qid));
   5964 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5965 				    IVAR_RX_MASK_Q_82574(qid));
   5966 			}
   5967 			/* Link status */
   5968 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5969 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5970 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5971 		} else {
   5972 			/* Interrupt control */
   5973 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5974 			    | GPIE_EIAME | GPIE_PBA);
   5975 
   5976 			switch (sc->sc_type) {
   5977 			case WM_T_82580:
   5978 			case WM_T_I350:
   5979 			case WM_T_I354:
   5980 			case WM_T_I210:
   5981 			case WM_T_I211:
   5982 				/* TX and RX */
   5983 				for (i = 0; i < sc->sc_nqueues; i++) {
   5984 					wmq = &sc->sc_queue[i];
   5985 					qid = wmq->wmq_id;
   5986 					qintr_idx = wmq->wmq_intr_idx;
   5987 
   5988 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5989 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5990 					ivar |= __SHIFTIN((qintr_idx
   5991 						| IVAR_VALID),
   5992 					    IVAR_TX_MASK_Q(qid));
   5993 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5994 					ivar |= __SHIFTIN((qintr_idx
   5995 						| IVAR_VALID),
   5996 					    IVAR_RX_MASK_Q(qid));
   5997 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5998 				}
   5999 				break;
   6000 			case WM_T_82576:
   6001 				/* TX and RX */
   6002 				for (i = 0; i < sc->sc_nqueues; i++) {
   6003 					wmq = &sc->sc_queue[i];
   6004 					qid = wmq->wmq_id;
   6005 					qintr_idx = wmq->wmq_intr_idx;
   6006 
   6007 					ivar = CSR_READ(sc,
   6008 					    WMREG_IVAR_Q_82576(qid));
   6009 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6010 					ivar |= __SHIFTIN((qintr_idx
   6011 						| IVAR_VALID),
   6012 					    IVAR_TX_MASK_Q_82576(qid));
   6013 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6014 					ivar |= __SHIFTIN((qintr_idx
   6015 						| IVAR_VALID),
   6016 					    IVAR_RX_MASK_Q_82576(qid));
   6017 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6018 					    ivar);
   6019 				}
   6020 				break;
   6021 			default:
   6022 				break;
   6023 			}
   6024 
   6025 			/* Link status */
   6026 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6027 			    IVAR_MISC_OTHER);
   6028 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6029 		}
   6030 
   6031 		if (wm_is_using_multiqueue(sc)) {
   6032 			wm_init_rss(sc);
   6033 
   6034 			/*
   6035 			** NOTE: Receive Full-Packet Checksum Offload
   6036 			** is mutually exclusive with Multiqueue. However
   6037 			** this is not the same as TCP/IP checksums which
   6038 			** still work.
   6039 			*/
   6040 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6041 			reg |= RXCSUM_PCSD;
   6042 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6043 		}
   6044 	}
   6045 
   6046 	/* Set up the interrupt registers. */
   6047 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6048 
   6049 	if (wm_is_using_msix(sc)) {
   6050 		uint32_t mask;
   6051 		struct wm_queue *wmq;
   6052 
   6053 		switch (sc->sc_type) {
   6054 		case WM_T_82574:
   6055 			mask = 0;
   6056 			for (i = 0; i < sc->sc_nqueues; i++) {
   6057 				wmq = &sc->sc_queue[i];
   6058 				mask |= ICR_TXQ(wmq->wmq_id);
   6059 				mask |= ICR_RXQ(wmq->wmq_id);
   6060 			}
   6061 			mask |= ICR_OTHER;
   6062 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6063 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6064 			break;
   6065 		default:
   6066 			if (sc->sc_type == WM_T_82575) {
   6067 				mask = 0;
   6068 				for (i = 0; i < sc->sc_nqueues; i++) {
   6069 					wmq = &sc->sc_queue[i];
   6070 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6071 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6072 				}
   6073 				mask |= EITR_OTHER;
   6074 			} else {
   6075 				mask = 0;
   6076 				for (i = 0; i < sc->sc_nqueues; i++) {
   6077 					wmq = &sc->sc_queue[i];
   6078 					mask |= 1 << wmq->wmq_intr_idx;
   6079 				}
   6080 				mask |= 1 << sc->sc_link_intr_idx;
   6081 			}
   6082 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6083 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6084 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6085 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6086 			break;
   6087 		}
   6088 	} else {
   6089 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6090 		    ICR_RXO | ICR_RXT0;
   6091 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6092 	}
   6093 
   6094 	/* Set up the inter-packet gap. */
   6095 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6096 
   6097 	if (sc->sc_type >= WM_T_82543) {
   6098 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6099 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6100 			wm_itrs_writereg(sc, wmq);
   6101 		}
   6102 		/*
   6103 		 * Link interrupts occur much less than TX
   6104 		 * interrupts and RX interrupts. So, we don't
   6105 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6106 		 * FreeBSD's if_igb.
   6107 		 */
   6108 	}
   6109 
   6110 	/* Set the VLAN ethernetype. */
   6111 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6112 
   6113 	/*
   6114 	 * Set up the transmit control register; we start out with
   6115 	 * a collision distance suitable for FDX, but update it whe
   6116 	 * we resolve the media type.
   6117 	 */
   6118 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6119 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6120 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6121 	if (sc->sc_type >= WM_T_82571)
   6122 		sc->sc_tctl |= TCTL_MULR;
   6123 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6124 
   6125 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6126 		/* Write TDT after TCTL.EN is set. See the document. */
   6127 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6128 	}
   6129 
   6130 	if (sc->sc_type == WM_T_80003) {
   6131 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6132 		reg &= ~TCTL_EXT_GCEX_MASK;
   6133 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6134 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6135 	}
   6136 
   6137 	/* Set the media. */
   6138 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6139 		goto out;
   6140 
   6141 	/* Configure for OS presence */
   6142 	wm_init_manageability(sc);
   6143 
   6144 	/*
   6145 	 * Set up the receive control register; we actually program the
   6146 	 * register when we set the receive filter. Use multicast address
   6147 	 * offset type 0.
   6148 	 *
   6149 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6150 	 * don't enable that feature.
   6151 	 */
   6152 	sc->sc_mchash_type = 0;
   6153 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6154 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6155 
   6156 	/* 82574 use one buffer extended Rx descriptor. */
   6157 	if (sc->sc_type == WM_T_82574)
   6158 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6159 
   6160 	/*
   6161 	 * The I350 has a bug where it always strips the CRC whether
   6162 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6163 	 */
   6164 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6165 	    || (sc->sc_type == WM_T_I210))
   6166 		sc->sc_rctl |= RCTL_SECRC;
   6167 
   6168 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6169 	    && (ifp->if_mtu > ETHERMTU)) {
   6170 		sc->sc_rctl |= RCTL_LPE;
   6171 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6172 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6173 	}
   6174 
   6175 	if (MCLBYTES == 2048)
   6176 		sc->sc_rctl |= RCTL_2k;
   6177 	else {
   6178 		if (sc->sc_type >= WM_T_82543) {
   6179 			switch (MCLBYTES) {
   6180 			case 4096:
   6181 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6182 				break;
   6183 			case 8192:
   6184 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6185 				break;
   6186 			case 16384:
   6187 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6188 				break;
   6189 			default:
   6190 				panic("wm_init: MCLBYTES %d unsupported",
   6191 				    MCLBYTES);
   6192 				break;
   6193 			}
   6194 		} else
   6195 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6196 	}
   6197 
   6198 	/* Enable ECC */
   6199 	switch (sc->sc_type) {
   6200 	case WM_T_82571:
   6201 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6202 		reg |= PBA_ECC_CORR_EN;
   6203 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6204 		break;
   6205 	case WM_T_PCH_LPT:
   6206 	case WM_T_PCH_SPT:
   6207 	case WM_T_PCH_CNP:
   6208 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6209 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6210 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6211 
   6212 		sc->sc_ctrl |= CTRL_MEHE;
   6213 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6214 		break;
   6215 	default:
   6216 		break;
   6217 	}
   6218 
   6219 	/*
   6220 	 * Set the receive filter.
   6221 	 *
   6222 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6223 	 * the setting of RCTL.EN in wm_set_filter()
   6224 	 */
   6225 	wm_set_filter(sc);
   6226 
   6227 	/* On 575 and later set RDT only if RX enabled */
   6228 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6229 		int qidx;
   6230 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6231 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6232 			for (i = 0; i < WM_NRXDESC; i++) {
   6233 				mutex_enter(rxq->rxq_lock);
   6234 				wm_init_rxdesc(rxq, i);
   6235 				mutex_exit(rxq->rxq_lock);
   6236 
   6237 			}
   6238 		}
   6239 	}
   6240 
   6241 	wm_unset_stopping_flags(sc);
   6242 
   6243 	/* Start the one second link check clock. */
   6244 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6245 
   6246 	/* ...all done! */
   6247 	ifp->if_flags |= IFF_RUNNING;
   6248 	ifp->if_flags &= ~IFF_OACTIVE;
   6249 
   6250  out:
   6251 	/* Save last flags for the callback */
   6252 	sc->sc_if_flags = ifp->if_flags;
   6253 	sc->sc_ec_capenable = ec->ec_capenable;
   6254 	if (error)
   6255 		log(LOG_ERR, "%s: interface not running\n",
   6256 		    device_xname(sc->sc_dev));
   6257 	return error;
   6258 }
   6259 
   6260 /*
   6261  * wm_stop:		[ifnet interface function]
   6262  *
   6263  *	Stop transmission on the interface.
   6264  */
   6265 static void
   6266 wm_stop(struct ifnet *ifp, int disable)
   6267 {
   6268 	struct wm_softc *sc = ifp->if_softc;
   6269 
   6270 	WM_CORE_LOCK(sc);
   6271 	wm_stop_locked(ifp, disable);
   6272 	WM_CORE_UNLOCK(sc);
   6273 }
   6274 
   6275 static void
   6276 wm_stop_locked(struct ifnet *ifp, int disable)
   6277 {
   6278 	struct wm_softc *sc = ifp->if_softc;
   6279 	struct wm_txsoft *txs;
   6280 	int i, qidx;
   6281 
   6282 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6283 		device_xname(sc->sc_dev), __func__));
   6284 	KASSERT(WM_CORE_LOCKED(sc));
   6285 
   6286 	wm_set_stopping_flags(sc);
   6287 
   6288 	/* Stop the one second clock. */
   6289 	callout_stop(&sc->sc_tick_ch);
   6290 
   6291 	/* Stop the 82547 Tx FIFO stall check timer. */
   6292 	if (sc->sc_type == WM_T_82547)
   6293 		callout_stop(&sc->sc_txfifo_ch);
   6294 
   6295 	if (sc->sc_flags & WM_F_HAS_MII) {
   6296 		/* Down the MII. */
   6297 		mii_down(&sc->sc_mii);
   6298 	} else {
   6299 #if 0
   6300 		/* Should we clear PHY's status properly? */
   6301 		wm_reset(sc);
   6302 #endif
   6303 	}
   6304 
   6305 	/* Stop the transmit and receive processes. */
   6306 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6307 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6308 	sc->sc_rctl &= ~RCTL_EN;
   6309 
   6310 	/*
   6311 	 * Clear the interrupt mask to ensure the device cannot assert its
   6312 	 * interrupt line.
   6313 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6314 	 * service any currently pending or shared interrupt.
   6315 	 */
   6316 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6317 	sc->sc_icr = 0;
   6318 	if (wm_is_using_msix(sc)) {
   6319 		if (sc->sc_type != WM_T_82574) {
   6320 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6321 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6322 		} else
   6323 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6324 	}
   6325 
   6326 	/* Release any queued transmit buffers. */
   6327 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6328 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6329 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6330 		mutex_enter(txq->txq_lock);
   6331 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6332 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6333 			txs = &txq->txq_soft[i];
   6334 			if (txs->txs_mbuf != NULL) {
   6335 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6336 				m_freem(txs->txs_mbuf);
   6337 				txs->txs_mbuf = NULL;
   6338 			}
   6339 		}
   6340 		mutex_exit(txq->txq_lock);
   6341 	}
   6342 
   6343 	/* Mark the interface as down and cancel the watchdog timer. */
   6344 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6345 
   6346 	if (disable) {
   6347 		for (i = 0; i < sc->sc_nqueues; i++) {
   6348 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6349 			mutex_enter(rxq->rxq_lock);
   6350 			wm_rxdrain(rxq);
   6351 			mutex_exit(rxq->rxq_lock);
   6352 		}
   6353 	}
   6354 
   6355 #if 0 /* notyet */
   6356 	if (sc->sc_type >= WM_T_82544)
   6357 		CSR_WRITE(sc, WMREG_WUC, 0);
   6358 #endif
   6359 }
   6360 
   6361 static void
   6362 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6363 {
   6364 	struct mbuf *m;
   6365 	int i;
   6366 
   6367 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6368 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6369 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6370 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6371 		    m->m_data, m->m_len, m->m_flags);
   6372 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6373 	    i, i == 1 ? "" : "s");
   6374 }
   6375 
   6376 /*
   6377  * wm_82547_txfifo_stall:
   6378  *
   6379  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6380  *	reset the FIFO pointers, and restart packet transmission.
   6381  */
   6382 static void
   6383 wm_82547_txfifo_stall(void *arg)
   6384 {
   6385 	struct wm_softc *sc = arg;
   6386 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6387 
   6388 	mutex_enter(txq->txq_lock);
   6389 
   6390 	if (txq->txq_stopping)
   6391 		goto out;
   6392 
   6393 	if (txq->txq_fifo_stall) {
   6394 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6395 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6396 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6397 			/*
   6398 			 * Packets have drained.  Stop transmitter, reset
   6399 			 * FIFO pointers, restart transmitter, and kick
   6400 			 * the packet queue.
   6401 			 */
   6402 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6403 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6404 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6405 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6406 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6407 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6408 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6409 			CSR_WRITE_FLUSH(sc);
   6410 
   6411 			txq->txq_fifo_head = 0;
   6412 			txq->txq_fifo_stall = 0;
   6413 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6414 		} else {
   6415 			/*
   6416 			 * Still waiting for packets to drain; try again in
   6417 			 * another tick.
   6418 			 */
   6419 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6420 		}
   6421 	}
   6422 
   6423 out:
   6424 	mutex_exit(txq->txq_lock);
   6425 }
   6426 
   6427 /*
   6428  * wm_82547_txfifo_bugchk:
   6429  *
   6430  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6431  *	prevent enqueueing a packet that would wrap around the end
   6432  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6433  *
   6434  *	We do this by checking the amount of space before the end
   6435  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6436  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6437  *	the internal FIFO pointers to the beginning, and restart
   6438  *	transmission on the interface.
   6439  */
   6440 #define	WM_FIFO_HDR		0x10
   6441 #define	WM_82547_PAD_LEN	0x3e0
   6442 static int
   6443 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6444 {
   6445 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6446 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6447 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6448 
   6449 	/* Just return if already stalled. */
   6450 	if (txq->txq_fifo_stall)
   6451 		return 1;
   6452 
   6453 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6454 		/* Stall only occurs in half-duplex mode. */
   6455 		goto send_packet;
   6456 	}
   6457 
   6458 	if (len >= WM_82547_PAD_LEN + space) {
   6459 		txq->txq_fifo_stall = 1;
   6460 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6461 		return 1;
   6462 	}
   6463 
   6464  send_packet:
   6465 	txq->txq_fifo_head += len;
   6466 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6467 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6468 
   6469 	return 0;
   6470 }
   6471 
   6472 static int
   6473 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6474 {
   6475 	int error;
   6476 
   6477 	/*
   6478 	 * Allocate the control data structures, and create and load the
   6479 	 * DMA map for it.
   6480 	 *
   6481 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6482 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6483 	 * both sets within the same 4G segment.
   6484 	 */
   6485 	if (sc->sc_type < WM_T_82544)
   6486 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6487 	else
   6488 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6489 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6490 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6491 	else
   6492 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6493 
   6494 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6495 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6496 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6497 		aprint_error_dev(sc->sc_dev,
   6498 		    "unable to allocate TX control data, error = %d\n",
   6499 		    error);
   6500 		goto fail_0;
   6501 	}
   6502 
   6503 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6504 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6505 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6506 		aprint_error_dev(sc->sc_dev,
   6507 		    "unable to map TX control data, error = %d\n", error);
   6508 		goto fail_1;
   6509 	}
   6510 
   6511 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6512 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6513 		aprint_error_dev(sc->sc_dev,
   6514 		    "unable to create TX control data DMA map, error = %d\n",
   6515 		    error);
   6516 		goto fail_2;
   6517 	}
   6518 
   6519 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6520 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6521 		aprint_error_dev(sc->sc_dev,
   6522 		    "unable to load TX control data DMA map, error = %d\n",
   6523 		    error);
   6524 		goto fail_3;
   6525 	}
   6526 
   6527 	return 0;
   6528 
   6529  fail_3:
   6530 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6531  fail_2:
   6532 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6533 	    WM_TXDESCS_SIZE(txq));
   6534  fail_1:
   6535 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6536  fail_0:
   6537 	return error;
   6538 }
   6539 
   6540 static void
   6541 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6542 {
   6543 
   6544 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6545 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6546 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6547 	    WM_TXDESCS_SIZE(txq));
   6548 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6549 }
   6550 
   6551 static int
   6552 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6553 {
   6554 	int error;
   6555 	size_t rxq_descs_size;
   6556 
   6557 	/*
   6558 	 * Allocate the control data structures, and create and load the
   6559 	 * DMA map for it.
   6560 	 *
   6561 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6562 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6563 	 * both sets within the same 4G segment.
   6564 	 */
   6565 	rxq->rxq_ndesc = WM_NRXDESC;
   6566 	if (sc->sc_type == WM_T_82574)
   6567 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6568 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6569 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6570 	else
   6571 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6572 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6573 
   6574 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6575 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6576 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6577 		aprint_error_dev(sc->sc_dev,
   6578 		    "unable to allocate RX control data, error = %d\n",
   6579 		    error);
   6580 		goto fail_0;
   6581 	}
   6582 
   6583 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6584 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6585 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6586 		aprint_error_dev(sc->sc_dev,
   6587 		    "unable to map RX control data, error = %d\n", error);
   6588 		goto fail_1;
   6589 	}
   6590 
   6591 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6592 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6593 		aprint_error_dev(sc->sc_dev,
   6594 		    "unable to create RX control data DMA map, error = %d\n",
   6595 		    error);
   6596 		goto fail_2;
   6597 	}
   6598 
   6599 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6600 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6601 		aprint_error_dev(sc->sc_dev,
   6602 		    "unable to load RX control data DMA map, error = %d\n",
   6603 		    error);
   6604 		goto fail_3;
   6605 	}
   6606 
   6607 	return 0;
   6608 
   6609  fail_3:
   6610 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6611  fail_2:
   6612 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6613 	    rxq_descs_size);
   6614  fail_1:
   6615 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6616  fail_0:
   6617 	return error;
   6618 }
   6619 
   6620 static void
   6621 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6622 {
   6623 
   6624 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6625 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6626 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6627 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6628 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6629 }
   6630 
   6631 
   6632 static int
   6633 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6634 {
   6635 	int i, error;
   6636 
   6637 	/* Create the transmit buffer DMA maps. */
   6638 	WM_TXQUEUELEN(txq) =
   6639 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6640 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6641 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6642 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6643 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6644 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6645 			aprint_error_dev(sc->sc_dev,
   6646 			    "unable to create Tx DMA map %d, error = %d\n",
   6647 			    i, error);
   6648 			goto fail;
   6649 		}
   6650 	}
   6651 
   6652 	return 0;
   6653 
   6654  fail:
   6655 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6656 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6657 			bus_dmamap_destroy(sc->sc_dmat,
   6658 			    txq->txq_soft[i].txs_dmamap);
   6659 	}
   6660 	return error;
   6661 }
   6662 
   6663 static void
   6664 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6665 {
   6666 	int i;
   6667 
   6668 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6669 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6670 			bus_dmamap_destroy(sc->sc_dmat,
   6671 			    txq->txq_soft[i].txs_dmamap);
   6672 	}
   6673 }
   6674 
   6675 static int
   6676 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6677 {
   6678 	int i, error;
   6679 
   6680 	/* Create the receive buffer DMA maps. */
   6681 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6682 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6683 			    MCLBYTES, 0, 0,
   6684 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6685 			aprint_error_dev(sc->sc_dev,
   6686 			    "unable to create Rx DMA map %d error = %d\n",
   6687 			    i, error);
   6688 			goto fail;
   6689 		}
   6690 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6691 	}
   6692 
   6693 	return 0;
   6694 
   6695  fail:
   6696 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6697 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6698 			bus_dmamap_destroy(sc->sc_dmat,
   6699 			    rxq->rxq_soft[i].rxs_dmamap);
   6700 	}
   6701 	return error;
   6702 }
   6703 
   6704 static void
   6705 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6706 {
   6707 	int i;
   6708 
   6709 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6710 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6711 			bus_dmamap_destroy(sc->sc_dmat,
   6712 			    rxq->rxq_soft[i].rxs_dmamap);
   6713 	}
   6714 }
   6715 
   6716 /*
   6717  * wm_alloc_quques:
   6718  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6719  */
   6720 static int
   6721 wm_alloc_txrx_queues(struct wm_softc *sc)
   6722 {
   6723 	int i, error, tx_done, rx_done;
   6724 
   6725 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6726 	    KM_SLEEP);
   6727 	if (sc->sc_queue == NULL) {
   6728 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6729 		error = ENOMEM;
   6730 		goto fail_0;
   6731 	}
   6732 
   6733 	/* For transmission */
   6734 	error = 0;
   6735 	tx_done = 0;
   6736 	for (i = 0; i < sc->sc_nqueues; i++) {
   6737 #ifdef WM_EVENT_COUNTERS
   6738 		int j;
   6739 		const char *xname;
   6740 #endif
   6741 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6742 		txq->txq_sc = sc;
   6743 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6744 
   6745 		error = wm_alloc_tx_descs(sc, txq);
   6746 		if (error)
   6747 			break;
   6748 		error = wm_alloc_tx_buffer(sc, txq);
   6749 		if (error) {
   6750 			wm_free_tx_descs(sc, txq);
   6751 			break;
   6752 		}
   6753 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6754 		if (txq->txq_interq == NULL) {
   6755 			wm_free_tx_descs(sc, txq);
   6756 			wm_free_tx_buffer(sc, txq);
   6757 			error = ENOMEM;
   6758 			break;
   6759 		}
   6760 
   6761 #ifdef WM_EVENT_COUNTERS
   6762 		xname = device_xname(sc->sc_dev);
   6763 
   6764 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6765 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6767 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6768 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6772 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6773 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6774 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6775 
   6776 		for (j = 0; j < WM_NTXSEGS; j++) {
   6777 			snprintf(txq->txq_txseg_evcnt_names[j],
   6778 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6779 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6780 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6781 		}
   6782 
   6783 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6784 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6785 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6786 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6787 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6788 #endif /* WM_EVENT_COUNTERS */
   6789 
   6790 		tx_done++;
   6791 	}
   6792 	if (error)
   6793 		goto fail_1;
   6794 
   6795 	/* For receive */
   6796 	error = 0;
   6797 	rx_done = 0;
   6798 	for (i = 0; i < sc->sc_nqueues; i++) {
   6799 #ifdef WM_EVENT_COUNTERS
   6800 		const char *xname;
   6801 #endif
   6802 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6803 		rxq->rxq_sc = sc;
   6804 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6805 
   6806 		error = wm_alloc_rx_descs(sc, rxq);
   6807 		if (error)
   6808 			break;
   6809 
   6810 		error = wm_alloc_rx_buffer(sc, rxq);
   6811 		if (error) {
   6812 			wm_free_rx_descs(sc, rxq);
   6813 			break;
   6814 		}
   6815 
   6816 #ifdef WM_EVENT_COUNTERS
   6817 		xname = device_xname(sc->sc_dev);
   6818 
   6819 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6820 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6821 
   6822 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6823 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6824 #endif /* WM_EVENT_COUNTERS */
   6825 
   6826 		rx_done++;
   6827 	}
   6828 	if (error)
   6829 		goto fail_2;
   6830 
   6831 	for (i = 0; i < sc->sc_nqueues; i++) {
   6832 		char rndname[16];
   6833 
   6834 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6835 		    device_xname(sc->sc_dev), i);
   6836 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   6837 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   6838 	}
   6839 
   6840 	return 0;
   6841 
   6842  fail_2:
   6843 	for (i = 0; i < rx_done; i++) {
   6844 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6845 		wm_free_rx_buffer(sc, rxq);
   6846 		wm_free_rx_descs(sc, rxq);
   6847 		if (rxq->rxq_lock)
   6848 			mutex_obj_free(rxq->rxq_lock);
   6849 	}
   6850  fail_1:
   6851 	for (i = 0; i < tx_done; i++) {
   6852 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6853 		pcq_destroy(txq->txq_interq);
   6854 		wm_free_tx_buffer(sc, txq);
   6855 		wm_free_tx_descs(sc, txq);
   6856 		if (txq->txq_lock)
   6857 			mutex_obj_free(txq->txq_lock);
   6858 	}
   6859 
   6860 	kmem_free(sc->sc_queue,
   6861 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6862  fail_0:
   6863 	return error;
   6864 }
   6865 
   6866 /*
   6867  * wm_free_quques:
   6868  *	Free {tx,rx}descs and {tx,rx} buffers
   6869  */
   6870 static void
   6871 wm_free_txrx_queues(struct wm_softc *sc)
   6872 {
   6873 	int i;
   6874 
   6875 	for (i = 0; i < sc->sc_nqueues; i++)
   6876 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   6877 
   6878 	for (i = 0; i < sc->sc_nqueues; i++) {
   6879 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6880 
   6881 #ifdef WM_EVENT_COUNTERS
   6882 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6883 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6884 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6885 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6886 #endif /* WM_EVENT_COUNTERS */
   6887 
   6888 		wm_free_rx_buffer(sc, rxq);
   6889 		wm_free_rx_descs(sc, rxq);
   6890 		if (rxq->rxq_lock)
   6891 			mutex_obj_free(rxq->rxq_lock);
   6892 	}
   6893 
   6894 	for (i = 0; i < sc->sc_nqueues; i++) {
   6895 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6896 		struct mbuf *m;
   6897 #ifdef WM_EVENT_COUNTERS
   6898 		int j;
   6899 
   6900 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6901 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6902 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6905 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6906 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6907 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6908 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6909 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6910 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6911 
   6912 		for (j = 0; j < WM_NTXSEGS; j++)
   6913 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6914 
   6915 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6916 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6917 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6918 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6919 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6920 #endif /* WM_EVENT_COUNTERS */
   6921 
   6922 		/* Drain txq_interq */
   6923 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6924 			m_freem(m);
   6925 		pcq_destroy(txq->txq_interq);
   6926 
   6927 		wm_free_tx_buffer(sc, txq);
   6928 		wm_free_tx_descs(sc, txq);
   6929 		if (txq->txq_lock)
   6930 			mutex_obj_free(txq->txq_lock);
   6931 	}
   6932 
   6933 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6934 }
   6935 
   6936 static void
   6937 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6938 {
   6939 
   6940 	KASSERT(mutex_owned(txq->txq_lock));
   6941 
   6942 	/* Initialize the transmit descriptor ring. */
   6943 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6944 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6945 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6946 	txq->txq_free = WM_NTXDESC(txq);
   6947 	txq->txq_next = 0;
   6948 }
   6949 
   6950 static void
   6951 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6952     struct wm_txqueue *txq)
   6953 {
   6954 
   6955 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6956 		device_xname(sc->sc_dev), __func__));
   6957 	KASSERT(mutex_owned(txq->txq_lock));
   6958 
   6959 	if (sc->sc_type < WM_T_82543) {
   6960 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6961 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6962 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6963 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6964 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6965 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6966 	} else {
   6967 		int qid = wmq->wmq_id;
   6968 
   6969 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6970 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6971 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6972 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6973 
   6974 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6975 			/*
   6976 			 * Don't write TDT before TCTL.EN is set.
   6977 			 * See the document.
   6978 			 */
   6979 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6980 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6981 			    | TXDCTL_WTHRESH(0));
   6982 		else {
   6983 			/* XXX should update with AIM? */
   6984 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6985 			if (sc->sc_type >= WM_T_82540) {
   6986 				/* Should be the same */
   6987 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6988 			}
   6989 
   6990 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6991 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6992 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6993 		}
   6994 	}
   6995 }
   6996 
   6997 static void
   6998 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6999 {
   7000 	int i;
   7001 
   7002 	KASSERT(mutex_owned(txq->txq_lock));
   7003 
   7004 	/* Initialize the transmit job descriptors. */
   7005 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7006 		txq->txq_soft[i].txs_mbuf = NULL;
   7007 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7008 	txq->txq_snext = 0;
   7009 	txq->txq_sdirty = 0;
   7010 }
   7011 
   7012 static void
   7013 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7014     struct wm_txqueue *txq)
   7015 {
   7016 
   7017 	KASSERT(mutex_owned(txq->txq_lock));
   7018 
   7019 	/*
   7020 	 * Set up some register offsets that are different between
   7021 	 * the i82542 and the i82543 and later chips.
   7022 	 */
   7023 	if (sc->sc_type < WM_T_82543)
   7024 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7025 	else
   7026 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7027 
   7028 	wm_init_tx_descs(sc, txq);
   7029 	wm_init_tx_regs(sc, wmq, txq);
   7030 	wm_init_tx_buffer(sc, txq);
   7031 
   7032 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7033 	txq->txq_sending = false;
   7034 }
   7035 
   7036 static void
   7037 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7038     struct wm_rxqueue *rxq)
   7039 {
   7040 
   7041 	KASSERT(mutex_owned(rxq->rxq_lock));
   7042 
   7043 	/*
   7044 	 * Initialize the receive descriptor and receive job
   7045 	 * descriptor rings.
   7046 	 */
   7047 	if (sc->sc_type < WM_T_82543) {
   7048 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7049 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7050 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7051 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7052 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7053 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7054 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7055 
   7056 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7057 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7058 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7059 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7060 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7061 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7062 	} else {
   7063 		int qid = wmq->wmq_id;
   7064 
   7065 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7066 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7067 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7068 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7069 
   7070 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7071 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7072 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7073 
   7074 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7075 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7076 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7077 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7078 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7079 			    | RXDCTL_WTHRESH(1));
   7080 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7081 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7082 		} else {
   7083 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7084 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7085 			/* XXX should update with AIM? */
   7086 			CSR_WRITE(sc, WMREG_RDTR,
   7087 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7088 			/* MUST be same */
   7089 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7090 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7091 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7092 		}
   7093 	}
   7094 }
   7095 
   7096 static int
   7097 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7098 {
   7099 	struct wm_rxsoft *rxs;
   7100 	int error, i;
   7101 
   7102 	KASSERT(mutex_owned(rxq->rxq_lock));
   7103 
   7104 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7105 		rxs = &rxq->rxq_soft[i];
   7106 		if (rxs->rxs_mbuf == NULL) {
   7107 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7108 				log(LOG_ERR, "%s: unable to allocate or map "
   7109 				    "rx buffer %d, error = %d\n",
   7110 				    device_xname(sc->sc_dev), i, error);
   7111 				/*
   7112 				 * XXX Should attempt to run with fewer receive
   7113 				 * XXX buffers instead of just failing.
   7114 				 */
   7115 				wm_rxdrain(rxq);
   7116 				return ENOMEM;
   7117 			}
   7118 		} else {
   7119 			/*
   7120 			 * For 82575 and 82576, the RX descriptors must be
   7121 			 * initialized after the setting of RCTL.EN in
   7122 			 * wm_set_filter()
   7123 			 */
   7124 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7125 				wm_init_rxdesc(rxq, i);
   7126 		}
   7127 	}
   7128 	rxq->rxq_ptr = 0;
   7129 	rxq->rxq_discard = 0;
   7130 	WM_RXCHAIN_RESET(rxq);
   7131 
   7132 	return 0;
   7133 }
   7134 
   7135 static int
   7136 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7137     struct wm_rxqueue *rxq)
   7138 {
   7139 
   7140 	KASSERT(mutex_owned(rxq->rxq_lock));
   7141 
   7142 	/*
   7143 	 * Set up some register offsets that are different between
   7144 	 * the i82542 and the i82543 and later chips.
   7145 	 */
   7146 	if (sc->sc_type < WM_T_82543)
   7147 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7148 	else
   7149 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7150 
   7151 	wm_init_rx_regs(sc, wmq, rxq);
   7152 	return wm_init_rx_buffer(sc, rxq);
   7153 }
   7154 
   7155 /*
   7156  * wm_init_quques:
   7157  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7158  */
   7159 static int
   7160 wm_init_txrx_queues(struct wm_softc *sc)
   7161 {
   7162 	int i, error = 0;
   7163 
   7164 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7165 		device_xname(sc->sc_dev), __func__));
   7166 
   7167 	for (i = 0; i < sc->sc_nqueues; i++) {
   7168 		struct wm_queue *wmq = &sc->sc_queue[i];
   7169 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7170 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7171 
   7172 		/*
   7173 		 * TODO
   7174 		 * Currently, use constant variable instead of AIM.
   7175 		 * Furthermore, the interrupt interval of multiqueue which use
   7176 		 * polling mode is less than default value.
   7177 		 * More tuning and AIM are required.
   7178 		 */
   7179 		if (wm_is_using_multiqueue(sc))
   7180 			wmq->wmq_itr = 50;
   7181 		else
   7182 			wmq->wmq_itr = sc->sc_itr_init;
   7183 		wmq->wmq_set_itr = true;
   7184 
   7185 		mutex_enter(txq->txq_lock);
   7186 		wm_init_tx_queue(sc, wmq, txq);
   7187 		mutex_exit(txq->txq_lock);
   7188 
   7189 		mutex_enter(rxq->rxq_lock);
   7190 		error = wm_init_rx_queue(sc, wmq, rxq);
   7191 		mutex_exit(rxq->rxq_lock);
   7192 		if (error)
   7193 			break;
   7194 	}
   7195 
   7196 	return error;
   7197 }
   7198 
   7199 /*
   7200  * wm_tx_offload:
   7201  *
   7202  *	Set up TCP/IP checksumming parameters for the
   7203  *	specified packet.
   7204  */
   7205 static int
   7206 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7207     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7208 {
   7209 	struct mbuf *m0 = txs->txs_mbuf;
   7210 	struct livengood_tcpip_ctxdesc *t;
   7211 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7212 	uint32_t ipcse;
   7213 	struct ether_header *eh;
   7214 	int offset, iphl;
   7215 	uint8_t fields;
   7216 
   7217 	/*
   7218 	 * XXX It would be nice if the mbuf pkthdr had offset
   7219 	 * fields for the protocol headers.
   7220 	 */
   7221 
   7222 	eh = mtod(m0, struct ether_header *);
   7223 	switch (htons(eh->ether_type)) {
   7224 	case ETHERTYPE_IP:
   7225 	case ETHERTYPE_IPV6:
   7226 		offset = ETHER_HDR_LEN;
   7227 		break;
   7228 
   7229 	case ETHERTYPE_VLAN:
   7230 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7231 		break;
   7232 
   7233 	default:
   7234 		/* Don't support this protocol or encapsulation. */
   7235 		*fieldsp = 0;
   7236 		*cmdp = 0;
   7237 		return 0;
   7238 	}
   7239 
   7240 	if ((m0->m_pkthdr.csum_flags &
   7241 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7242 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7243 	} else
   7244 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7245 
   7246 	ipcse = offset + iphl - 1;
   7247 
   7248 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7249 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7250 	seg = 0;
   7251 	fields = 0;
   7252 
   7253 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7254 		int hlen = offset + iphl;
   7255 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7256 
   7257 		if (__predict_false(m0->m_len <
   7258 				    (hlen + sizeof(struct tcphdr)))) {
   7259 			/*
   7260 			 * TCP/IP headers are not in the first mbuf; we need
   7261 			 * to do this the slow and painful way. Let's just
   7262 			 * hope this doesn't happen very often.
   7263 			 */
   7264 			struct tcphdr th;
   7265 
   7266 			WM_Q_EVCNT_INCR(txq, tsopain);
   7267 
   7268 			m_copydata(m0, hlen, sizeof(th), &th);
   7269 			if (v4) {
   7270 				struct ip ip;
   7271 
   7272 				m_copydata(m0, offset, sizeof(ip), &ip);
   7273 				ip.ip_len = 0;
   7274 				m_copyback(m0,
   7275 				    offset + offsetof(struct ip, ip_len),
   7276 				    sizeof(ip.ip_len), &ip.ip_len);
   7277 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7278 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7279 			} else {
   7280 				struct ip6_hdr ip6;
   7281 
   7282 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7283 				ip6.ip6_plen = 0;
   7284 				m_copyback(m0,
   7285 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7286 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7287 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7288 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7289 			}
   7290 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7291 			    sizeof(th.th_sum), &th.th_sum);
   7292 
   7293 			hlen += th.th_off << 2;
   7294 		} else {
   7295 			/*
   7296 			 * TCP/IP headers are in the first mbuf; we can do
   7297 			 * this the easy way.
   7298 			 */
   7299 			struct tcphdr *th;
   7300 
   7301 			if (v4) {
   7302 				struct ip *ip =
   7303 				    (void *)(mtod(m0, char *) + offset);
   7304 				th = (void *)(mtod(m0, char *) + hlen);
   7305 
   7306 				ip->ip_len = 0;
   7307 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7308 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7309 			} else {
   7310 				struct ip6_hdr *ip6 =
   7311 				    (void *)(mtod(m0, char *) + offset);
   7312 				th = (void *)(mtod(m0, char *) + hlen);
   7313 
   7314 				ip6->ip6_plen = 0;
   7315 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7316 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7317 			}
   7318 			hlen += th->th_off << 2;
   7319 		}
   7320 
   7321 		if (v4) {
   7322 			WM_Q_EVCNT_INCR(txq, tso);
   7323 			cmdlen |= WTX_TCPIP_CMD_IP;
   7324 		} else {
   7325 			WM_Q_EVCNT_INCR(txq, tso6);
   7326 			ipcse = 0;
   7327 		}
   7328 		cmd |= WTX_TCPIP_CMD_TSE;
   7329 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7330 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7331 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7332 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7333 	}
   7334 
   7335 	/*
   7336 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7337 	 * offload feature, if we load the context descriptor, we
   7338 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7339 	 */
   7340 
   7341 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7342 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7343 	    WTX_TCPIP_IPCSE(ipcse);
   7344 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7345 		WM_Q_EVCNT_INCR(txq, ipsum);
   7346 		fields |= WTX_IXSM;
   7347 	}
   7348 
   7349 	offset += iphl;
   7350 
   7351 	if (m0->m_pkthdr.csum_flags &
   7352 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7353 		WM_Q_EVCNT_INCR(txq, tusum);
   7354 		fields |= WTX_TXSM;
   7355 		tucs = WTX_TCPIP_TUCSS(offset) |
   7356 		    WTX_TCPIP_TUCSO(offset +
   7357 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7358 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7359 	} else if ((m0->m_pkthdr.csum_flags &
   7360 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7361 		WM_Q_EVCNT_INCR(txq, tusum6);
   7362 		fields |= WTX_TXSM;
   7363 		tucs = WTX_TCPIP_TUCSS(offset) |
   7364 		    WTX_TCPIP_TUCSO(offset +
   7365 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7366 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7367 	} else {
   7368 		/* Just initialize it to a valid TCP context. */
   7369 		tucs = WTX_TCPIP_TUCSS(offset) |
   7370 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7371 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7372 	}
   7373 
   7374 	/*
   7375 	 * We don't have to write context descriptor for every packet
   7376 	 * except for 82574. For 82574, we must write context descriptor
   7377 	 * for every packet when we use two descriptor queues.
   7378 	 * It would be overhead to write context descriptor for every packet,
   7379 	 * however it does not cause problems.
   7380 	 */
   7381 	/* Fill in the context descriptor. */
   7382 	t = (struct livengood_tcpip_ctxdesc *)
   7383 	    &txq->txq_descs[txq->txq_next];
   7384 	t->tcpip_ipcs = htole32(ipcs);
   7385 	t->tcpip_tucs = htole32(tucs);
   7386 	t->tcpip_cmdlen = htole32(cmdlen);
   7387 	t->tcpip_seg = htole32(seg);
   7388 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7389 
   7390 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7391 	txs->txs_ndesc++;
   7392 
   7393 	*cmdp = cmd;
   7394 	*fieldsp = fields;
   7395 
   7396 	return 0;
   7397 }
   7398 
   7399 static inline int
   7400 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7401 {
   7402 	struct wm_softc *sc = ifp->if_softc;
   7403 	u_int cpuid = cpu_index(curcpu());
   7404 
   7405 	/*
   7406 	 * Currently, simple distribute strategy.
   7407 	 * TODO:
   7408 	 * distribute by flowid(RSS has value).
   7409 	 */
   7410 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7411 }
   7412 
   7413 /*
   7414  * wm_start:		[ifnet interface function]
   7415  *
   7416  *	Start packet transmission on the interface.
   7417  */
   7418 static void
   7419 wm_start(struct ifnet *ifp)
   7420 {
   7421 	struct wm_softc *sc = ifp->if_softc;
   7422 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7423 
   7424 #ifdef WM_MPSAFE
   7425 	KASSERT(if_is_mpsafe(ifp));
   7426 #endif
   7427 	/*
   7428 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7429 	 */
   7430 
   7431 	mutex_enter(txq->txq_lock);
   7432 	if (!txq->txq_stopping)
   7433 		wm_start_locked(ifp);
   7434 	mutex_exit(txq->txq_lock);
   7435 }
   7436 
   7437 static void
   7438 wm_start_locked(struct ifnet *ifp)
   7439 {
   7440 	struct wm_softc *sc = ifp->if_softc;
   7441 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7442 
   7443 	wm_send_common_locked(ifp, txq, false);
   7444 }
   7445 
   7446 static int
   7447 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7448 {
   7449 	int qid;
   7450 	struct wm_softc *sc = ifp->if_softc;
   7451 	struct wm_txqueue *txq;
   7452 
   7453 	qid = wm_select_txqueue(ifp, m);
   7454 	txq = &sc->sc_queue[qid].wmq_txq;
   7455 
   7456 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7457 		m_freem(m);
   7458 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7459 		return ENOBUFS;
   7460 	}
   7461 
   7462 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7463 	ifp->if_obytes += m->m_pkthdr.len;
   7464 	if (m->m_flags & M_MCAST)
   7465 		ifp->if_omcasts++;
   7466 
   7467 	if (mutex_tryenter(txq->txq_lock)) {
   7468 		if (!txq->txq_stopping)
   7469 			wm_transmit_locked(ifp, txq);
   7470 		mutex_exit(txq->txq_lock);
   7471 	}
   7472 
   7473 	return 0;
   7474 }
   7475 
   7476 static void
   7477 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7478 {
   7479 
   7480 	wm_send_common_locked(ifp, txq, true);
   7481 }
   7482 
   7483 static void
   7484 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7485     bool is_transmit)
   7486 {
   7487 	struct wm_softc *sc = ifp->if_softc;
   7488 	struct mbuf *m0;
   7489 	struct wm_txsoft *txs;
   7490 	bus_dmamap_t dmamap;
   7491 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7492 	bus_addr_t curaddr;
   7493 	bus_size_t seglen, curlen;
   7494 	uint32_t cksumcmd;
   7495 	uint8_t cksumfields;
   7496 	bool remap = true;
   7497 
   7498 	KASSERT(mutex_owned(txq->txq_lock));
   7499 
   7500 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7501 		return;
   7502 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7503 		return;
   7504 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7505 		return;
   7506 
   7507 	/* Remember the previous number of free descriptors. */
   7508 	ofree = txq->txq_free;
   7509 
   7510 	/*
   7511 	 * Loop through the send queue, setting up transmit descriptors
   7512 	 * until we drain the queue, or use up all available transmit
   7513 	 * descriptors.
   7514 	 */
   7515 	for (;;) {
   7516 		m0 = NULL;
   7517 
   7518 		/* Get a work queue entry. */
   7519 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7520 			wm_txeof(txq, UINT_MAX);
   7521 			if (txq->txq_sfree == 0) {
   7522 				DPRINTF(WM_DEBUG_TX,
   7523 				    ("%s: TX: no free job descriptors\n",
   7524 					device_xname(sc->sc_dev)));
   7525 				WM_Q_EVCNT_INCR(txq, txsstall);
   7526 				break;
   7527 			}
   7528 		}
   7529 
   7530 		/* Grab a packet off the queue. */
   7531 		if (is_transmit)
   7532 			m0 = pcq_get(txq->txq_interq);
   7533 		else
   7534 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7535 		if (m0 == NULL)
   7536 			break;
   7537 
   7538 		DPRINTF(WM_DEBUG_TX,
   7539 		    ("%s: TX: have packet to transmit: %p\n",
   7540 			device_xname(sc->sc_dev), m0));
   7541 
   7542 		txs = &txq->txq_soft[txq->txq_snext];
   7543 		dmamap = txs->txs_dmamap;
   7544 
   7545 		use_tso = (m0->m_pkthdr.csum_flags &
   7546 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7547 
   7548 		/*
   7549 		 * So says the Linux driver:
   7550 		 * The controller does a simple calculation to make sure
   7551 		 * there is enough room in the FIFO before initiating the
   7552 		 * DMA for each buffer. The calc is:
   7553 		 *	4 = ceil(buffer len / MSS)
   7554 		 * To make sure we don't overrun the FIFO, adjust the max
   7555 		 * buffer len if the MSS drops.
   7556 		 */
   7557 		dmamap->dm_maxsegsz =
   7558 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7559 		    ? m0->m_pkthdr.segsz << 2
   7560 		    : WTX_MAX_LEN;
   7561 
   7562 		/*
   7563 		 * Load the DMA map.  If this fails, the packet either
   7564 		 * didn't fit in the allotted number of segments, or we
   7565 		 * were short on resources.  For the too-many-segments
   7566 		 * case, we simply report an error and drop the packet,
   7567 		 * since we can't sanely copy a jumbo packet to a single
   7568 		 * buffer.
   7569 		 */
   7570 retry:
   7571 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7572 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7573 		if (__predict_false(error)) {
   7574 			if (error == EFBIG) {
   7575 				if (remap == true) {
   7576 					struct mbuf *m;
   7577 
   7578 					remap = false;
   7579 					m = m_defrag(m0, M_NOWAIT);
   7580 					if (m != NULL) {
   7581 						WM_Q_EVCNT_INCR(txq, defrag);
   7582 						m0 = m;
   7583 						goto retry;
   7584 					}
   7585 				}
   7586 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7587 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7588 				    "DMA segments, dropping...\n",
   7589 				    device_xname(sc->sc_dev));
   7590 				wm_dump_mbuf_chain(sc, m0);
   7591 				m_freem(m0);
   7592 				continue;
   7593 			}
   7594 			/* Short on resources, just stop for now. */
   7595 			DPRINTF(WM_DEBUG_TX,
   7596 			    ("%s: TX: dmamap load failed: %d\n",
   7597 				device_xname(sc->sc_dev), error));
   7598 			break;
   7599 		}
   7600 
   7601 		segs_needed = dmamap->dm_nsegs;
   7602 		if (use_tso) {
   7603 			/* For sentinel descriptor; see below. */
   7604 			segs_needed++;
   7605 		}
   7606 
   7607 		/*
   7608 		 * Ensure we have enough descriptors free to describe
   7609 		 * the packet. Note, we always reserve one descriptor
   7610 		 * at the end of the ring due to the semantics of the
   7611 		 * TDT register, plus one more in the event we need
   7612 		 * to load offload context.
   7613 		 */
   7614 		if (segs_needed > txq->txq_free - 2) {
   7615 			/*
   7616 			 * Not enough free descriptors to transmit this
   7617 			 * packet.  We haven't committed anything yet,
   7618 			 * so just unload the DMA map, put the packet
   7619 			 * pack on the queue, and punt. Notify the upper
   7620 			 * layer that there are no more slots left.
   7621 			 */
   7622 			DPRINTF(WM_DEBUG_TX,
   7623 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7624 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7625 				segs_needed, txq->txq_free - 1));
   7626 			if (!is_transmit)
   7627 				ifp->if_flags |= IFF_OACTIVE;
   7628 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7629 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7630 			WM_Q_EVCNT_INCR(txq, txdstall);
   7631 			break;
   7632 		}
   7633 
   7634 		/*
   7635 		 * Check for 82547 Tx FIFO bug. We need to do this
   7636 		 * once we know we can transmit the packet, since we
   7637 		 * do some internal FIFO space accounting here.
   7638 		 */
   7639 		if (sc->sc_type == WM_T_82547 &&
   7640 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7641 			DPRINTF(WM_DEBUG_TX,
   7642 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7643 				device_xname(sc->sc_dev)));
   7644 			if (!is_transmit)
   7645 				ifp->if_flags |= IFF_OACTIVE;
   7646 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7647 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7648 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7649 			break;
   7650 		}
   7651 
   7652 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7653 
   7654 		DPRINTF(WM_DEBUG_TX,
   7655 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7656 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7657 
   7658 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7659 
   7660 		/*
   7661 		 * Store a pointer to the packet so that we can free it
   7662 		 * later.
   7663 		 *
   7664 		 * Initially, we consider the number of descriptors the
   7665 		 * packet uses the number of DMA segments.  This may be
   7666 		 * incremented by 1 if we do checksum offload (a descriptor
   7667 		 * is used to set the checksum context).
   7668 		 */
   7669 		txs->txs_mbuf = m0;
   7670 		txs->txs_firstdesc = txq->txq_next;
   7671 		txs->txs_ndesc = segs_needed;
   7672 
   7673 		/* Set up offload parameters for this packet. */
   7674 		if (m0->m_pkthdr.csum_flags &
   7675 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7676 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7677 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7678 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7679 					  &cksumfields) != 0) {
   7680 				/* Error message already displayed. */
   7681 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7682 				continue;
   7683 			}
   7684 		} else {
   7685 			cksumcmd = 0;
   7686 			cksumfields = 0;
   7687 		}
   7688 
   7689 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7690 
   7691 		/* Sync the DMA map. */
   7692 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7693 		    BUS_DMASYNC_PREWRITE);
   7694 
   7695 		/* Initialize the transmit descriptor. */
   7696 		for (nexttx = txq->txq_next, seg = 0;
   7697 		     seg < dmamap->dm_nsegs; seg++) {
   7698 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7699 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7700 			     seglen != 0;
   7701 			     curaddr += curlen, seglen -= curlen,
   7702 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7703 				curlen = seglen;
   7704 
   7705 				/*
   7706 				 * So says the Linux driver:
   7707 				 * Work around for premature descriptor
   7708 				 * write-backs in TSO mode.  Append a
   7709 				 * 4-byte sentinel descriptor.
   7710 				 */
   7711 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7712 				    curlen > 8)
   7713 					curlen -= 4;
   7714 
   7715 				wm_set_dma_addr(
   7716 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7717 				txq->txq_descs[nexttx].wtx_cmdlen
   7718 				    = htole32(cksumcmd | curlen);
   7719 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7720 				    = 0;
   7721 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7722 				    = cksumfields;
   7723 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7724 				lasttx = nexttx;
   7725 
   7726 				DPRINTF(WM_DEBUG_TX,
   7727 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7728 					"len %#04zx\n",
   7729 					device_xname(sc->sc_dev), nexttx,
   7730 					(uint64_t)curaddr, curlen));
   7731 			}
   7732 		}
   7733 
   7734 		KASSERT(lasttx != -1);
   7735 
   7736 		/*
   7737 		 * Set up the command byte on the last descriptor of
   7738 		 * the packet. If we're in the interrupt delay window,
   7739 		 * delay the interrupt.
   7740 		 */
   7741 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7742 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7743 
   7744 		/*
   7745 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7746 		 * up the descriptor to encapsulate the packet for us.
   7747 		 *
   7748 		 * This is only valid on the last descriptor of the packet.
   7749 		 */
   7750 		if (vlan_has_tag(m0)) {
   7751 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7752 			    htole32(WTX_CMD_VLE);
   7753 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7754 			    = htole16(vlan_get_tag(m0));
   7755 		}
   7756 
   7757 		txs->txs_lastdesc = lasttx;
   7758 
   7759 		DPRINTF(WM_DEBUG_TX,
   7760 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7761 			device_xname(sc->sc_dev),
   7762 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7763 
   7764 		/* Sync the descriptors we're using. */
   7765 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7766 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7767 
   7768 		/* Give the packet to the chip. */
   7769 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7770 
   7771 		DPRINTF(WM_DEBUG_TX,
   7772 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7773 
   7774 		DPRINTF(WM_DEBUG_TX,
   7775 		    ("%s: TX: finished transmitting packet, job %d\n",
   7776 			device_xname(sc->sc_dev), txq->txq_snext));
   7777 
   7778 		/* Advance the tx pointer. */
   7779 		txq->txq_free -= txs->txs_ndesc;
   7780 		txq->txq_next = nexttx;
   7781 
   7782 		txq->txq_sfree--;
   7783 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7784 
   7785 		/* Pass the packet to any BPF listeners. */
   7786 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7787 	}
   7788 
   7789 	if (m0 != NULL) {
   7790 		if (!is_transmit)
   7791 			ifp->if_flags |= IFF_OACTIVE;
   7792 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7793 		WM_Q_EVCNT_INCR(txq, descdrop);
   7794 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7795 			__func__));
   7796 		m_freem(m0);
   7797 	}
   7798 
   7799 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7800 		/* No more slots; notify upper layer. */
   7801 		if (!is_transmit)
   7802 			ifp->if_flags |= IFF_OACTIVE;
   7803 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7804 	}
   7805 
   7806 	if (txq->txq_free != ofree) {
   7807 		/* Set a watchdog timer in case the chip flakes out. */
   7808 		txq->txq_lastsent = time_uptime;
   7809 		txq->txq_sending = true;
   7810 	}
   7811 }
   7812 
   7813 /*
   7814  * wm_nq_tx_offload:
   7815  *
   7816  *	Set up TCP/IP checksumming parameters for the
   7817  *	specified packet, for NEWQUEUE devices
   7818  */
   7819 static int
   7820 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7821     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7822 {
   7823 	struct mbuf *m0 = txs->txs_mbuf;
   7824 	uint32_t vl_len, mssidx, cmdc;
   7825 	struct ether_header *eh;
   7826 	int offset, iphl;
   7827 
   7828 	/*
   7829 	 * XXX It would be nice if the mbuf pkthdr had offset
   7830 	 * fields for the protocol headers.
   7831 	 */
   7832 	*cmdlenp = 0;
   7833 	*fieldsp = 0;
   7834 
   7835 	eh = mtod(m0, struct ether_header *);
   7836 	switch (htons(eh->ether_type)) {
   7837 	case ETHERTYPE_IP:
   7838 	case ETHERTYPE_IPV6:
   7839 		offset = ETHER_HDR_LEN;
   7840 		break;
   7841 
   7842 	case ETHERTYPE_VLAN:
   7843 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7844 		break;
   7845 
   7846 	default:
   7847 		/* Don't support this protocol or encapsulation. */
   7848 		*do_csum = false;
   7849 		return 0;
   7850 	}
   7851 	*do_csum = true;
   7852 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7853 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7854 
   7855 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7856 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7857 
   7858 	if ((m0->m_pkthdr.csum_flags &
   7859 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7860 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7861 	} else {
   7862 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7863 	}
   7864 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7865 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7866 
   7867 	if (vlan_has_tag(m0)) {
   7868 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7869 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7870 		*cmdlenp |= NQTX_CMD_VLE;
   7871 	}
   7872 
   7873 	mssidx = 0;
   7874 
   7875 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7876 		int hlen = offset + iphl;
   7877 		int tcp_hlen;
   7878 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7879 
   7880 		if (__predict_false(m0->m_len <
   7881 				    (hlen + sizeof(struct tcphdr)))) {
   7882 			/*
   7883 			 * TCP/IP headers are not in the first mbuf; we need
   7884 			 * to do this the slow and painful way. Let's just
   7885 			 * hope this doesn't happen very often.
   7886 			 */
   7887 			struct tcphdr th;
   7888 
   7889 			WM_Q_EVCNT_INCR(txq, tsopain);
   7890 
   7891 			m_copydata(m0, hlen, sizeof(th), &th);
   7892 			if (v4) {
   7893 				struct ip ip;
   7894 
   7895 				m_copydata(m0, offset, sizeof(ip), &ip);
   7896 				ip.ip_len = 0;
   7897 				m_copyback(m0,
   7898 				    offset + offsetof(struct ip, ip_len),
   7899 				    sizeof(ip.ip_len), &ip.ip_len);
   7900 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7901 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7902 			} else {
   7903 				struct ip6_hdr ip6;
   7904 
   7905 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7906 				ip6.ip6_plen = 0;
   7907 				m_copyback(m0,
   7908 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7909 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7910 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7911 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7912 			}
   7913 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7914 			    sizeof(th.th_sum), &th.th_sum);
   7915 
   7916 			tcp_hlen = th.th_off << 2;
   7917 		} else {
   7918 			/*
   7919 			 * TCP/IP headers are in the first mbuf; we can do
   7920 			 * this the easy way.
   7921 			 */
   7922 			struct tcphdr *th;
   7923 
   7924 			if (v4) {
   7925 				struct ip *ip =
   7926 				    (void *)(mtod(m0, char *) + offset);
   7927 				th = (void *)(mtod(m0, char *) + hlen);
   7928 
   7929 				ip->ip_len = 0;
   7930 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7931 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7932 			} else {
   7933 				struct ip6_hdr *ip6 =
   7934 				    (void *)(mtod(m0, char *) + offset);
   7935 				th = (void *)(mtod(m0, char *) + hlen);
   7936 
   7937 				ip6->ip6_plen = 0;
   7938 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7939 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7940 			}
   7941 			tcp_hlen = th->th_off << 2;
   7942 		}
   7943 		hlen += tcp_hlen;
   7944 		*cmdlenp |= NQTX_CMD_TSE;
   7945 
   7946 		if (v4) {
   7947 			WM_Q_EVCNT_INCR(txq, tso);
   7948 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7949 		} else {
   7950 			WM_Q_EVCNT_INCR(txq, tso6);
   7951 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7952 		}
   7953 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7954 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7955 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7956 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7957 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7958 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7959 	} else {
   7960 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7961 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7962 	}
   7963 
   7964 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7965 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7966 		cmdc |= NQTXC_CMD_IP4;
   7967 	}
   7968 
   7969 	if (m0->m_pkthdr.csum_flags &
   7970 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7971 		WM_Q_EVCNT_INCR(txq, tusum);
   7972 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7973 			cmdc |= NQTXC_CMD_TCP;
   7974 		else
   7975 			cmdc |= NQTXC_CMD_UDP;
   7976 
   7977 		cmdc |= NQTXC_CMD_IP4;
   7978 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7979 	}
   7980 	if (m0->m_pkthdr.csum_flags &
   7981 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7982 		WM_Q_EVCNT_INCR(txq, tusum6);
   7983 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7984 			cmdc |= NQTXC_CMD_TCP;
   7985 		else
   7986 			cmdc |= NQTXC_CMD_UDP;
   7987 
   7988 		cmdc |= NQTXC_CMD_IP6;
   7989 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7990 	}
   7991 
   7992 	/*
   7993 	 * We don't have to write context descriptor for every packet to
   7994 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7995 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7996 	 * controllers.
   7997 	 * It would be overhead to write context descriptor for every packet,
   7998 	 * however it does not cause problems.
   7999 	 */
   8000 	/* Fill in the context descriptor. */
   8001 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8002 	    htole32(vl_len);
   8003 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8004 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8005 	    htole32(cmdc);
   8006 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8007 	    htole32(mssidx);
   8008 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8009 	DPRINTF(WM_DEBUG_TX,
   8010 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8011 		txq->txq_next, 0, vl_len));
   8012 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8013 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8014 	txs->txs_ndesc++;
   8015 	return 0;
   8016 }
   8017 
   8018 /*
   8019  * wm_nq_start:		[ifnet interface function]
   8020  *
   8021  *	Start packet transmission on the interface for NEWQUEUE devices
   8022  */
   8023 static void
   8024 wm_nq_start(struct ifnet *ifp)
   8025 {
   8026 	struct wm_softc *sc = ifp->if_softc;
   8027 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8028 
   8029 #ifdef WM_MPSAFE
   8030 	KASSERT(if_is_mpsafe(ifp));
   8031 #endif
   8032 	/*
   8033 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8034 	 */
   8035 
   8036 	mutex_enter(txq->txq_lock);
   8037 	if (!txq->txq_stopping)
   8038 		wm_nq_start_locked(ifp);
   8039 	mutex_exit(txq->txq_lock);
   8040 }
   8041 
   8042 static void
   8043 wm_nq_start_locked(struct ifnet *ifp)
   8044 {
   8045 	struct wm_softc *sc = ifp->if_softc;
   8046 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8047 
   8048 	wm_nq_send_common_locked(ifp, txq, false);
   8049 }
   8050 
   8051 static int
   8052 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8053 {
   8054 	int qid;
   8055 	struct wm_softc *sc = ifp->if_softc;
   8056 	struct wm_txqueue *txq;
   8057 
   8058 	qid = wm_select_txqueue(ifp, m);
   8059 	txq = &sc->sc_queue[qid].wmq_txq;
   8060 
   8061 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8062 		m_freem(m);
   8063 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8064 		return ENOBUFS;
   8065 	}
   8066 
   8067 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8068 	ifp->if_obytes += m->m_pkthdr.len;
   8069 	if (m->m_flags & M_MCAST)
   8070 		ifp->if_omcasts++;
   8071 
   8072 	/*
   8073 	 * The situations which this mutex_tryenter() fails at running time
   8074 	 * are below two patterns.
   8075 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8076 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8077 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8078 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8079 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8080 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8081 	 * stuck, either.
   8082 	 */
   8083 	if (mutex_tryenter(txq->txq_lock)) {
   8084 		if (!txq->txq_stopping)
   8085 			wm_nq_transmit_locked(ifp, txq);
   8086 		mutex_exit(txq->txq_lock);
   8087 	}
   8088 
   8089 	return 0;
   8090 }
   8091 
   8092 static void
   8093 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8094 {
   8095 
   8096 	wm_nq_send_common_locked(ifp, txq, true);
   8097 }
   8098 
   8099 static void
   8100 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8101     bool is_transmit)
   8102 {
   8103 	struct wm_softc *sc = ifp->if_softc;
   8104 	struct mbuf *m0;
   8105 	struct wm_txsoft *txs;
   8106 	bus_dmamap_t dmamap;
   8107 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8108 	bool do_csum, sent;
   8109 	bool remap = true;
   8110 
   8111 	KASSERT(mutex_owned(txq->txq_lock));
   8112 
   8113 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8114 		return;
   8115 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8116 		return;
   8117 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8118 		return;
   8119 
   8120 	sent = false;
   8121 
   8122 	/*
   8123 	 * Loop through the send queue, setting up transmit descriptors
   8124 	 * until we drain the queue, or use up all available transmit
   8125 	 * descriptors.
   8126 	 */
   8127 	for (;;) {
   8128 		m0 = NULL;
   8129 
   8130 		/* Get a work queue entry. */
   8131 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8132 			wm_txeof(txq, UINT_MAX);
   8133 			if (txq->txq_sfree == 0) {
   8134 				DPRINTF(WM_DEBUG_TX,
   8135 				    ("%s: TX: no free job descriptors\n",
   8136 					device_xname(sc->sc_dev)));
   8137 				WM_Q_EVCNT_INCR(txq, txsstall);
   8138 				break;
   8139 			}
   8140 		}
   8141 
   8142 		/* Grab a packet off the queue. */
   8143 		if (is_transmit)
   8144 			m0 = pcq_get(txq->txq_interq);
   8145 		else
   8146 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8147 		if (m0 == NULL)
   8148 			break;
   8149 
   8150 		DPRINTF(WM_DEBUG_TX,
   8151 		    ("%s: TX: have packet to transmit: %p\n",
   8152 		    device_xname(sc->sc_dev), m0));
   8153 
   8154 		txs = &txq->txq_soft[txq->txq_snext];
   8155 		dmamap = txs->txs_dmamap;
   8156 
   8157 		/*
   8158 		 * Load the DMA map.  If this fails, the packet either
   8159 		 * didn't fit in the allotted number of segments, or we
   8160 		 * were short on resources.  For the too-many-segments
   8161 		 * case, we simply report an error and drop the packet,
   8162 		 * since we can't sanely copy a jumbo packet to a single
   8163 		 * buffer.
   8164 		 */
   8165 retry:
   8166 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8167 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8168 		if (__predict_false(error)) {
   8169 			if (error == EFBIG) {
   8170 				if (remap == true) {
   8171 					struct mbuf *m;
   8172 
   8173 					remap = false;
   8174 					m = m_defrag(m0, M_NOWAIT);
   8175 					if (m != NULL) {
   8176 						WM_Q_EVCNT_INCR(txq, defrag);
   8177 						m0 = m;
   8178 						goto retry;
   8179 					}
   8180 				}
   8181 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8182 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8183 				    "DMA segments, dropping...\n",
   8184 				    device_xname(sc->sc_dev));
   8185 				wm_dump_mbuf_chain(sc, m0);
   8186 				m_freem(m0);
   8187 				continue;
   8188 			}
   8189 			/* Short on resources, just stop for now. */
   8190 			DPRINTF(WM_DEBUG_TX,
   8191 			    ("%s: TX: dmamap load failed: %d\n",
   8192 				device_xname(sc->sc_dev), error));
   8193 			break;
   8194 		}
   8195 
   8196 		segs_needed = dmamap->dm_nsegs;
   8197 
   8198 		/*
   8199 		 * Ensure we have enough descriptors free to describe
   8200 		 * the packet. Note, we always reserve one descriptor
   8201 		 * at the end of the ring due to the semantics of the
   8202 		 * TDT register, plus one more in the event we need
   8203 		 * to load offload context.
   8204 		 */
   8205 		if (segs_needed > txq->txq_free - 2) {
   8206 			/*
   8207 			 * Not enough free descriptors to transmit this
   8208 			 * packet.  We haven't committed anything yet,
   8209 			 * so just unload the DMA map, put the packet
   8210 			 * pack on the queue, and punt. Notify the upper
   8211 			 * layer that there are no more slots left.
   8212 			 */
   8213 			DPRINTF(WM_DEBUG_TX,
   8214 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8215 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8216 				segs_needed, txq->txq_free - 1));
   8217 			if (!is_transmit)
   8218 				ifp->if_flags |= IFF_OACTIVE;
   8219 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8220 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8221 			WM_Q_EVCNT_INCR(txq, txdstall);
   8222 			break;
   8223 		}
   8224 
   8225 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8226 
   8227 		DPRINTF(WM_DEBUG_TX,
   8228 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8229 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8230 
   8231 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8232 
   8233 		/*
   8234 		 * Store a pointer to the packet so that we can free it
   8235 		 * later.
   8236 		 *
   8237 		 * Initially, we consider the number of descriptors the
   8238 		 * packet uses the number of DMA segments.  This may be
   8239 		 * incremented by 1 if we do checksum offload (a descriptor
   8240 		 * is used to set the checksum context).
   8241 		 */
   8242 		txs->txs_mbuf = m0;
   8243 		txs->txs_firstdesc = txq->txq_next;
   8244 		txs->txs_ndesc = segs_needed;
   8245 
   8246 		/* Set up offload parameters for this packet. */
   8247 		uint32_t cmdlen, fields, dcmdlen;
   8248 		if (m0->m_pkthdr.csum_flags &
   8249 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8250 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8251 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8252 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8253 			    &do_csum) != 0) {
   8254 				/* Error message already displayed. */
   8255 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8256 				continue;
   8257 			}
   8258 		} else {
   8259 			do_csum = false;
   8260 			cmdlen = 0;
   8261 			fields = 0;
   8262 		}
   8263 
   8264 		/* Sync the DMA map. */
   8265 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8266 		    BUS_DMASYNC_PREWRITE);
   8267 
   8268 		/* Initialize the first transmit descriptor. */
   8269 		nexttx = txq->txq_next;
   8270 		if (!do_csum) {
   8271 			/* Setup a legacy descriptor */
   8272 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8273 			    dmamap->dm_segs[0].ds_addr);
   8274 			txq->txq_descs[nexttx].wtx_cmdlen =
   8275 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8276 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8277 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8278 			if (vlan_has_tag(m0)) {
   8279 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8280 				    htole32(WTX_CMD_VLE);
   8281 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8282 				    htole16(vlan_get_tag(m0));
   8283 			} else
   8284 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8285 
   8286 			dcmdlen = 0;
   8287 		} else {
   8288 			/* Setup an advanced data descriptor */
   8289 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8290 			    htole64(dmamap->dm_segs[0].ds_addr);
   8291 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8292 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8293 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8294 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8295 			    htole32(fields);
   8296 			DPRINTF(WM_DEBUG_TX,
   8297 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8298 				device_xname(sc->sc_dev), nexttx,
   8299 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8300 			DPRINTF(WM_DEBUG_TX,
   8301 			    ("\t 0x%08x%08x\n", fields,
   8302 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8303 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8304 		}
   8305 
   8306 		lasttx = nexttx;
   8307 		nexttx = WM_NEXTTX(txq, nexttx);
   8308 		/*
   8309 		 * Fill in the next descriptors. legacy or advanced format
   8310 		 * is the same here
   8311 		 */
   8312 		for (seg = 1; seg < dmamap->dm_nsegs;
   8313 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8314 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8315 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8316 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8317 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8318 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8319 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8320 			lasttx = nexttx;
   8321 
   8322 			DPRINTF(WM_DEBUG_TX,
   8323 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8324 				device_xname(sc->sc_dev), nexttx,
   8325 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8326 				dmamap->dm_segs[seg].ds_len));
   8327 		}
   8328 
   8329 		KASSERT(lasttx != -1);
   8330 
   8331 		/*
   8332 		 * Set up the command byte on the last descriptor of
   8333 		 * the packet. If we're in the interrupt delay window,
   8334 		 * delay the interrupt.
   8335 		 */
   8336 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8337 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8338 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8339 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8340 
   8341 		txs->txs_lastdesc = lasttx;
   8342 
   8343 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8344 		    device_xname(sc->sc_dev),
   8345 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8346 
   8347 		/* Sync the descriptors we're using. */
   8348 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8349 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8350 
   8351 		/* Give the packet to the chip. */
   8352 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8353 		sent = true;
   8354 
   8355 		DPRINTF(WM_DEBUG_TX,
   8356 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8357 
   8358 		DPRINTF(WM_DEBUG_TX,
   8359 		    ("%s: TX: finished transmitting packet, job %d\n",
   8360 			device_xname(sc->sc_dev), txq->txq_snext));
   8361 
   8362 		/* Advance the tx pointer. */
   8363 		txq->txq_free -= txs->txs_ndesc;
   8364 		txq->txq_next = nexttx;
   8365 
   8366 		txq->txq_sfree--;
   8367 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8368 
   8369 		/* Pass the packet to any BPF listeners. */
   8370 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8371 	}
   8372 
   8373 	if (m0 != NULL) {
   8374 		if (!is_transmit)
   8375 			ifp->if_flags |= IFF_OACTIVE;
   8376 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8377 		WM_Q_EVCNT_INCR(txq, descdrop);
   8378 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8379 			__func__));
   8380 		m_freem(m0);
   8381 	}
   8382 
   8383 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8384 		/* No more slots; notify upper layer. */
   8385 		if (!is_transmit)
   8386 			ifp->if_flags |= IFF_OACTIVE;
   8387 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8388 	}
   8389 
   8390 	if (sent) {
   8391 		/* Set a watchdog timer in case the chip flakes out. */
   8392 		txq->txq_lastsent = time_uptime;
   8393 		txq->txq_sending = true;
   8394 	}
   8395 }
   8396 
   8397 static void
   8398 wm_deferred_start_locked(struct wm_txqueue *txq)
   8399 {
   8400 	struct wm_softc *sc = txq->txq_sc;
   8401 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8402 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8403 	int qid = wmq->wmq_id;
   8404 
   8405 	KASSERT(mutex_owned(txq->txq_lock));
   8406 
   8407 	if (txq->txq_stopping) {
   8408 		mutex_exit(txq->txq_lock);
   8409 		return;
   8410 	}
   8411 
   8412 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8413 		/* XXX need for ALTQ or one CPU system */
   8414 		if (qid == 0)
   8415 			wm_nq_start_locked(ifp);
   8416 		wm_nq_transmit_locked(ifp, txq);
   8417 	} else {
   8418 		/* XXX need for ALTQ or one CPU system */
   8419 		if (qid == 0)
   8420 			wm_start_locked(ifp);
   8421 		wm_transmit_locked(ifp, txq);
   8422 	}
   8423 }
   8424 
   8425 /* Interrupt */
   8426 
   8427 /*
   8428  * wm_txeof:
   8429  *
   8430  *	Helper; handle transmit interrupts.
   8431  */
   8432 static bool
   8433 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8434 {
   8435 	struct wm_softc *sc = txq->txq_sc;
   8436 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8437 	struct wm_txsoft *txs;
   8438 	int count = 0;
   8439 	int i;
   8440 	uint8_t status;
   8441 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8442 	bool more = false;
   8443 
   8444 	KASSERT(mutex_owned(txq->txq_lock));
   8445 
   8446 	if (txq->txq_stopping)
   8447 		return false;
   8448 
   8449 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8450 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8451 	if (wmq->wmq_id == 0)
   8452 		ifp->if_flags &= ~IFF_OACTIVE;
   8453 
   8454 	/*
   8455 	 * Go through the Tx list and free mbufs for those
   8456 	 * frames which have been transmitted.
   8457 	 */
   8458 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8459 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8460 		if (limit-- == 0) {
   8461 			more = true;
   8462 			DPRINTF(WM_DEBUG_TX,
   8463 			    ("%s: TX: loop limited, job %d is not processed\n",
   8464 				device_xname(sc->sc_dev), i));
   8465 			break;
   8466 		}
   8467 
   8468 		txs = &txq->txq_soft[i];
   8469 
   8470 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8471 			device_xname(sc->sc_dev), i));
   8472 
   8473 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8474 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8475 
   8476 		status =
   8477 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8478 		if ((status & WTX_ST_DD) == 0) {
   8479 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8480 			    BUS_DMASYNC_PREREAD);
   8481 			break;
   8482 		}
   8483 
   8484 		count++;
   8485 		DPRINTF(WM_DEBUG_TX,
   8486 		    ("%s: TX: job %d done: descs %d..%d\n",
   8487 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8488 		    txs->txs_lastdesc));
   8489 
   8490 		/*
   8491 		 * XXX We should probably be using the statistics
   8492 		 * XXX registers, but I don't know if they exist
   8493 		 * XXX on chips before the i82544.
   8494 		 */
   8495 
   8496 #ifdef WM_EVENT_COUNTERS
   8497 		if (status & WTX_ST_TU)
   8498 			WM_Q_EVCNT_INCR(txq, underrun);
   8499 #endif /* WM_EVENT_COUNTERS */
   8500 
   8501 		/*
   8502 		 * 82574 and newer's document says the status field has neither
   8503 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8504 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8505 		 * Developer's Manual", 82574 datasheet and newer.
   8506 		 *
   8507 		 * XXX I saw the LC bit was set on I218 even though the media
   8508 		 * was full duplex, so the bit might be used for other
   8509 		 * meaning ...(I have no document).
   8510 		 */
   8511 
   8512 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8513 		    && ((sc->sc_type < WM_T_82574)
   8514 			|| (sc->sc_type == WM_T_80003))) {
   8515 			ifp->if_oerrors++;
   8516 			if (status & WTX_ST_LC)
   8517 				log(LOG_WARNING, "%s: late collision\n",
   8518 				    device_xname(sc->sc_dev));
   8519 			else if (status & WTX_ST_EC) {
   8520 				ifp->if_collisions +=
   8521 				    TX_COLLISION_THRESHOLD + 1;
   8522 				log(LOG_WARNING, "%s: excessive collisions\n",
   8523 				    device_xname(sc->sc_dev));
   8524 			}
   8525 		} else
   8526 			ifp->if_opackets++;
   8527 
   8528 		txq->txq_packets++;
   8529 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8530 
   8531 		txq->txq_free += txs->txs_ndesc;
   8532 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8533 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8534 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8535 		m_freem(txs->txs_mbuf);
   8536 		txs->txs_mbuf = NULL;
   8537 	}
   8538 
   8539 	/* Update the dirty transmit buffer pointer. */
   8540 	txq->txq_sdirty = i;
   8541 	DPRINTF(WM_DEBUG_TX,
   8542 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8543 
   8544 	/*
   8545 	 * If there are no more pending transmissions, cancel the watchdog
   8546 	 * timer.
   8547 	 */
   8548 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8549 		txq->txq_sending = false;
   8550 
   8551 	return more;
   8552 }
   8553 
   8554 static inline uint32_t
   8555 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8556 {
   8557 	struct wm_softc *sc = rxq->rxq_sc;
   8558 
   8559 	if (sc->sc_type == WM_T_82574)
   8560 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8561 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8562 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8563 	else
   8564 		return rxq->rxq_descs[idx].wrx_status;
   8565 }
   8566 
   8567 static inline uint32_t
   8568 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8569 {
   8570 	struct wm_softc *sc = rxq->rxq_sc;
   8571 
   8572 	if (sc->sc_type == WM_T_82574)
   8573 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8574 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8575 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8576 	else
   8577 		return rxq->rxq_descs[idx].wrx_errors;
   8578 }
   8579 
   8580 static inline uint16_t
   8581 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8582 {
   8583 	struct wm_softc *sc = rxq->rxq_sc;
   8584 
   8585 	if (sc->sc_type == WM_T_82574)
   8586 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8587 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8588 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8589 	else
   8590 		return rxq->rxq_descs[idx].wrx_special;
   8591 }
   8592 
   8593 static inline int
   8594 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8595 {
   8596 	struct wm_softc *sc = rxq->rxq_sc;
   8597 
   8598 	if (sc->sc_type == WM_T_82574)
   8599 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8600 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8601 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8602 	else
   8603 		return rxq->rxq_descs[idx].wrx_len;
   8604 }
   8605 
   8606 #ifdef WM_DEBUG
   8607 static inline uint32_t
   8608 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8609 {
   8610 	struct wm_softc *sc = rxq->rxq_sc;
   8611 
   8612 	if (sc->sc_type == WM_T_82574)
   8613 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8614 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8615 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8616 	else
   8617 		return 0;
   8618 }
   8619 
   8620 static inline uint8_t
   8621 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8622 {
   8623 	struct wm_softc *sc = rxq->rxq_sc;
   8624 
   8625 	if (sc->sc_type == WM_T_82574)
   8626 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8627 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8628 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8629 	else
   8630 		return 0;
   8631 }
   8632 #endif /* WM_DEBUG */
   8633 
   8634 static inline bool
   8635 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8636     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8637 {
   8638 
   8639 	if (sc->sc_type == WM_T_82574)
   8640 		return (status & ext_bit) != 0;
   8641 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8642 		return (status & nq_bit) != 0;
   8643 	else
   8644 		return (status & legacy_bit) != 0;
   8645 }
   8646 
   8647 static inline bool
   8648 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8649     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8650 {
   8651 
   8652 	if (sc->sc_type == WM_T_82574)
   8653 		return (error & ext_bit) != 0;
   8654 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8655 		return (error & nq_bit) != 0;
   8656 	else
   8657 		return (error & legacy_bit) != 0;
   8658 }
   8659 
   8660 static inline bool
   8661 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8662 {
   8663 
   8664 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8665 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8666 		return true;
   8667 	else
   8668 		return false;
   8669 }
   8670 
   8671 static inline bool
   8672 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8673 {
   8674 	struct wm_softc *sc = rxq->rxq_sc;
   8675 
   8676 	/* XXX missing error bit for newqueue? */
   8677 	if (wm_rxdesc_is_set_error(sc, errors,
   8678 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8679 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8680 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8681 		NQRXC_ERROR_RXE)) {
   8682 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8683 		    EXTRXC_ERROR_SE, 0))
   8684 			log(LOG_WARNING, "%s: symbol error\n",
   8685 			    device_xname(sc->sc_dev));
   8686 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8687 		    EXTRXC_ERROR_SEQ, 0))
   8688 			log(LOG_WARNING, "%s: receive sequence error\n",
   8689 			    device_xname(sc->sc_dev));
   8690 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8691 		    EXTRXC_ERROR_CE, 0))
   8692 			log(LOG_WARNING, "%s: CRC error\n",
   8693 			    device_xname(sc->sc_dev));
   8694 		return true;
   8695 	}
   8696 
   8697 	return false;
   8698 }
   8699 
   8700 static inline bool
   8701 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8702 {
   8703 	struct wm_softc *sc = rxq->rxq_sc;
   8704 
   8705 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8706 		NQRXC_STATUS_DD)) {
   8707 		/* We have processed all of the receive descriptors. */
   8708 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8709 		return false;
   8710 	}
   8711 
   8712 	return true;
   8713 }
   8714 
   8715 static inline bool
   8716 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8717     uint16_t vlantag, struct mbuf *m)
   8718 {
   8719 
   8720 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8721 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8722 		vlan_set_tag(m, le16toh(vlantag));
   8723 	}
   8724 
   8725 	return true;
   8726 }
   8727 
   8728 static inline void
   8729 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8730     uint32_t errors, struct mbuf *m)
   8731 {
   8732 	struct wm_softc *sc = rxq->rxq_sc;
   8733 
   8734 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8735 		if (wm_rxdesc_is_set_status(sc, status,
   8736 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8737 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8738 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8739 			if (wm_rxdesc_is_set_error(sc, errors,
   8740 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8741 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8742 		}
   8743 		if (wm_rxdesc_is_set_status(sc, status,
   8744 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8745 			/*
   8746 			 * Note: we don't know if this was TCP or UDP,
   8747 			 * so we just set both bits, and expect the
   8748 			 * upper layers to deal.
   8749 			 */
   8750 			WM_Q_EVCNT_INCR(rxq, tusum);
   8751 			m->m_pkthdr.csum_flags |=
   8752 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8753 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8754 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8755 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8756 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8757 		}
   8758 	}
   8759 }
   8760 
   8761 /*
   8762  * wm_rxeof:
   8763  *
   8764  *	Helper; handle receive interrupts.
   8765  */
   8766 static bool
   8767 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8768 {
   8769 	struct wm_softc *sc = rxq->rxq_sc;
   8770 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8771 	struct wm_rxsoft *rxs;
   8772 	struct mbuf *m;
   8773 	int i, len;
   8774 	int count = 0;
   8775 	uint32_t status, errors;
   8776 	uint16_t vlantag;
   8777 	bool more = false;
   8778 
   8779 	KASSERT(mutex_owned(rxq->rxq_lock));
   8780 
   8781 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8782 		if (limit-- == 0) {
   8783 			rxq->rxq_ptr = i;
   8784 			more = true;
   8785 			DPRINTF(WM_DEBUG_RX,
   8786 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8787 				device_xname(sc->sc_dev), i));
   8788 			break;
   8789 		}
   8790 
   8791 		rxs = &rxq->rxq_soft[i];
   8792 
   8793 		DPRINTF(WM_DEBUG_RX,
   8794 		    ("%s: RX: checking descriptor %d\n",
   8795 			device_xname(sc->sc_dev), i));
   8796 		wm_cdrxsync(rxq, i,
   8797 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8798 
   8799 		status = wm_rxdesc_get_status(rxq, i);
   8800 		errors = wm_rxdesc_get_errors(rxq, i);
   8801 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8802 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8803 #ifdef WM_DEBUG
   8804 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8805 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8806 #endif
   8807 
   8808 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8809 			/*
   8810 			 * Update the receive pointer holding rxq_lock
   8811 			 * consistent with increment counter.
   8812 			 */
   8813 			rxq->rxq_ptr = i;
   8814 			break;
   8815 		}
   8816 
   8817 		count++;
   8818 		if (__predict_false(rxq->rxq_discard)) {
   8819 			DPRINTF(WM_DEBUG_RX,
   8820 			    ("%s: RX: discarding contents of descriptor %d\n",
   8821 				device_xname(sc->sc_dev), i));
   8822 			wm_init_rxdesc(rxq, i);
   8823 			if (wm_rxdesc_is_eop(rxq, status)) {
   8824 				/* Reset our state. */
   8825 				DPRINTF(WM_DEBUG_RX,
   8826 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8827 					device_xname(sc->sc_dev)));
   8828 				rxq->rxq_discard = 0;
   8829 			}
   8830 			continue;
   8831 		}
   8832 
   8833 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8834 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8835 
   8836 		m = rxs->rxs_mbuf;
   8837 
   8838 		/*
   8839 		 * Add a new receive buffer to the ring, unless of
   8840 		 * course the length is zero. Treat the latter as a
   8841 		 * failed mapping.
   8842 		 */
   8843 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8844 			/*
   8845 			 * Failed, throw away what we've done so
   8846 			 * far, and discard the rest of the packet.
   8847 			 */
   8848 			ifp->if_ierrors++;
   8849 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8850 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8851 			wm_init_rxdesc(rxq, i);
   8852 			if (!wm_rxdesc_is_eop(rxq, status))
   8853 				rxq->rxq_discard = 1;
   8854 			if (rxq->rxq_head != NULL)
   8855 				m_freem(rxq->rxq_head);
   8856 			WM_RXCHAIN_RESET(rxq);
   8857 			DPRINTF(WM_DEBUG_RX,
   8858 			    ("%s: RX: Rx buffer allocation failed, "
   8859 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8860 				rxq->rxq_discard ? " (discard)" : ""));
   8861 			continue;
   8862 		}
   8863 
   8864 		m->m_len = len;
   8865 		rxq->rxq_len += len;
   8866 		DPRINTF(WM_DEBUG_RX,
   8867 		    ("%s: RX: buffer at %p len %d\n",
   8868 			device_xname(sc->sc_dev), m->m_data, len));
   8869 
   8870 		/* If this is not the end of the packet, keep looking. */
   8871 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8872 			WM_RXCHAIN_LINK(rxq, m);
   8873 			DPRINTF(WM_DEBUG_RX,
   8874 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8875 				device_xname(sc->sc_dev), rxq->rxq_len));
   8876 			continue;
   8877 		}
   8878 
   8879 		/*
   8880 		 * Okay, we have the entire packet now. The chip is
   8881 		 * configured to include the FCS except I350 and I21[01]
   8882 		 * (not all chips can be configured to strip it),
   8883 		 * so we need to trim it.
   8884 		 * May need to adjust length of previous mbuf in the
   8885 		 * chain if the current mbuf is too short.
   8886 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8887 		 * is always set in I350, so we don't trim it.
   8888 		 */
   8889 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8890 		    && (sc->sc_type != WM_T_I210)
   8891 		    && (sc->sc_type != WM_T_I211)) {
   8892 			if (m->m_len < ETHER_CRC_LEN) {
   8893 				rxq->rxq_tail->m_len
   8894 				    -= (ETHER_CRC_LEN - m->m_len);
   8895 				m->m_len = 0;
   8896 			} else
   8897 				m->m_len -= ETHER_CRC_LEN;
   8898 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8899 		} else
   8900 			len = rxq->rxq_len;
   8901 
   8902 		WM_RXCHAIN_LINK(rxq, m);
   8903 
   8904 		*rxq->rxq_tailp = NULL;
   8905 		m = rxq->rxq_head;
   8906 
   8907 		WM_RXCHAIN_RESET(rxq);
   8908 
   8909 		DPRINTF(WM_DEBUG_RX,
   8910 		    ("%s: RX: have entire packet, len -> %d\n",
   8911 			device_xname(sc->sc_dev), len));
   8912 
   8913 		/* If an error occurred, update stats and drop the packet. */
   8914 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8915 			m_freem(m);
   8916 			continue;
   8917 		}
   8918 
   8919 		/* No errors.  Receive the packet. */
   8920 		m_set_rcvif(m, ifp);
   8921 		m->m_pkthdr.len = len;
   8922 		/*
   8923 		 * TODO
   8924 		 * should be save rsshash and rsstype to this mbuf.
   8925 		 */
   8926 		DPRINTF(WM_DEBUG_RX,
   8927 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8928 			device_xname(sc->sc_dev), rsstype, rsshash));
   8929 
   8930 		/*
   8931 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8932 		 * for us.  Associate the tag with the packet.
   8933 		 */
   8934 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8935 			continue;
   8936 
   8937 		/* Set up checksum info for this packet. */
   8938 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8939 		/*
   8940 		 * Update the receive pointer holding rxq_lock consistent with
   8941 		 * increment counter.
   8942 		 */
   8943 		rxq->rxq_ptr = i;
   8944 		rxq->rxq_packets++;
   8945 		rxq->rxq_bytes += len;
   8946 		mutex_exit(rxq->rxq_lock);
   8947 
   8948 		/* Pass it on. */
   8949 		if_percpuq_enqueue(sc->sc_ipq, m);
   8950 
   8951 		mutex_enter(rxq->rxq_lock);
   8952 
   8953 		if (rxq->rxq_stopping)
   8954 			break;
   8955 	}
   8956 
   8957 	DPRINTF(WM_DEBUG_RX,
   8958 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8959 
   8960 	return more;
   8961 }
   8962 
   8963 /*
   8964  * wm_linkintr_gmii:
   8965  *
   8966  *	Helper; handle link interrupts for GMII.
   8967  */
   8968 static void
   8969 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8970 {
   8971 	device_t dev = sc->sc_dev;
   8972 	uint32_t status, reg;
   8973 	bool link;
   8974 	int rv;
   8975 
   8976 	KASSERT(WM_CORE_LOCKED(sc));
   8977 
   8978 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8979 		__func__));
   8980 
   8981 	if ((icr & ICR_LSC) == 0) {
   8982 		if (icr & ICR_RXSEQ)
   8983 			DPRINTF(WM_DEBUG_LINK,
   8984 			    ("%s: LINK Receive sequence error\n",
   8985 				device_xname(dev)));
   8986 		return;
   8987 	}
   8988 
   8989 	/* Link status changed */
   8990 	status = CSR_READ(sc, WMREG_STATUS);
   8991 	link = status & STATUS_LU;
   8992 	if (link) {
   8993 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8994 			device_xname(dev),
   8995 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8996 	} else {
   8997 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8998 			device_xname(dev)));
   8999 	}
   9000 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9001 		wm_gig_downshift_workaround_ich8lan(sc);
   9002 
   9003 	if ((sc->sc_type == WM_T_ICH8)
   9004 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9005 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9006 	}
   9007 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9008 		device_xname(dev)));
   9009 	mii_pollstat(&sc->sc_mii);
   9010 	if (sc->sc_type == WM_T_82543) {
   9011 		int miistatus, active;
   9012 
   9013 		/*
   9014 		 * With 82543, we need to force speed and
   9015 		 * duplex on the MAC equal to what the PHY
   9016 		 * speed and duplex configuration is.
   9017 		 */
   9018 		miistatus = sc->sc_mii.mii_media_status;
   9019 
   9020 		if (miistatus & IFM_ACTIVE) {
   9021 			active = sc->sc_mii.mii_media_active;
   9022 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9023 			switch (IFM_SUBTYPE(active)) {
   9024 			case IFM_10_T:
   9025 				sc->sc_ctrl |= CTRL_SPEED_10;
   9026 				break;
   9027 			case IFM_100_TX:
   9028 				sc->sc_ctrl |= CTRL_SPEED_100;
   9029 				break;
   9030 			case IFM_1000_T:
   9031 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9032 				break;
   9033 			default:
   9034 				/*
   9035 				 * Fiber?
   9036 				 * Shoud not enter here.
   9037 				 */
   9038 				device_printf(dev, "unknown media (%x)\n",
   9039 				    active);
   9040 				break;
   9041 			}
   9042 			if (active & IFM_FDX)
   9043 				sc->sc_ctrl |= CTRL_FD;
   9044 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9045 		}
   9046 	} else if (sc->sc_type == WM_T_PCH) {
   9047 		wm_k1_gig_workaround_hv(sc,
   9048 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9049 	}
   9050 
   9051 	/*
   9052 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9053 	 * aggressive resulting in many collisions. To avoid this, increase
   9054 	 * the IPG and reduce Rx latency in the PHY.
   9055 	 */
   9056 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9057 	    && link) {
   9058 		uint32_t tipg_reg;
   9059 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9060 		bool fdx;
   9061 		uint16_t emi_addr, emi_val;
   9062 
   9063 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9064 		tipg_reg &= ~TIPG_IPGT_MASK;
   9065 		fdx = status & STATUS_FD;
   9066 
   9067 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9068 			tipg_reg |= 0xff;
   9069 			/* Reduce Rx latency in analog PHY */
   9070 			emi_val = 0;
   9071 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9072 		    fdx && speed != STATUS_SPEED_1000) {
   9073 			tipg_reg |= 0xc;
   9074 			emi_val = 1;
   9075 		} else {
   9076 			/* Roll back the default values */
   9077 			tipg_reg |= 0x08;
   9078 			emi_val = 1;
   9079 		}
   9080 
   9081 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9082 
   9083 		rv = sc->phy.acquire(sc);
   9084 		if (rv)
   9085 			return;
   9086 
   9087 		if (sc->sc_type == WM_T_PCH2)
   9088 			emi_addr = I82579_RX_CONFIG;
   9089 		else
   9090 			emi_addr = I217_RX_CONFIG;
   9091 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9092 
   9093 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9094 			uint16_t phy_reg;
   9095 
   9096 			sc->phy.readreg_locked(dev, 2,
   9097 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9098 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9099 			if (speed == STATUS_SPEED_100
   9100 			    || speed == STATUS_SPEED_10)
   9101 				phy_reg |= 0x3e8;
   9102 			else
   9103 				phy_reg |= 0xfa;
   9104 			sc->phy.writereg_locked(dev, 2,
   9105 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9106 
   9107 			if (speed == STATUS_SPEED_1000) {
   9108 				sc->phy.readreg_locked(dev, 2,
   9109 				    HV_PM_CTRL, &phy_reg);
   9110 
   9111 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9112 
   9113 				sc->phy.writereg_locked(dev, 2,
   9114 				    HV_PM_CTRL, phy_reg);
   9115 			}
   9116 		}
   9117 		sc->phy.release(sc);
   9118 
   9119 		if (rv)
   9120 			return;
   9121 
   9122 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9123 			uint16_t data, ptr_gap;
   9124 
   9125 			if (speed == STATUS_SPEED_1000) {
   9126 				rv = sc->phy.acquire(sc);
   9127 				if (rv)
   9128 					return;
   9129 
   9130 				rv = sc->phy.readreg_locked(dev, 2,
   9131 				    I219_UNKNOWN1, &data);
   9132 				if (rv) {
   9133 					sc->phy.release(sc);
   9134 					return;
   9135 				}
   9136 
   9137 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9138 				if (ptr_gap < 0x18) {
   9139 					data &= ~(0x3ff << 2);
   9140 					data |= (0x18 << 2);
   9141 					rv = sc->phy.writereg_locked(dev,
   9142 					    2, I219_UNKNOWN1, data);
   9143 				}
   9144 				sc->phy.release(sc);
   9145 				if (rv)
   9146 					return;
   9147 			} else {
   9148 				rv = sc->phy.acquire(sc);
   9149 				if (rv)
   9150 					return;
   9151 
   9152 				rv = sc->phy.writereg_locked(dev, 2,
   9153 				    I219_UNKNOWN1, 0xc023);
   9154 				sc->phy.release(sc);
   9155 				if (rv)
   9156 					return;
   9157 
   9158 			}
   9159 		}
   9160 	}
   9161 
   9162 	/*
   9163 	 * I217 Packet Loss issue:
   9164 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9165 	 * on power up.
   9166 	 * Set the Beacon Duration for I217 to 8 usec
   9167 	 */
   9168 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9169 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9170 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9171 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9172 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9173 	}
   9174 
   9175 	/* Work-around I218 hang issue */
   9176 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9177 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9178 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9179 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9180 		wm_k1_workaround_lpt_lp(sc, link);
   9181 
   9182 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9183 		/*
   9184 		 * Set platform power management values for Latency
   9185 		 * Tolerance Reporting (LTR)
   9186 		 */
   9187 		wm_platform_pm_pch_lpt(sc,
   9188 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9189 	}
   9190 
   9191 	/* Clear link partner's EEE ability */
   9192 	sc->eee_lp_ability = 0;
   9193 
   9194 	/* FEXTNVM6 K1-off workaround */
   9195 	if (sc->sc_type == WM_T_PCH_SPT) {
   9196 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9197 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9198 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9199 		else
   9200 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9201 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9202 	}
   9203 
   9204 	if (!link)
   9205 		return;
   9206 
   9207 	switch (sc->sc_type) {
   9208 	case WM_T_PCH2:
   9209 		wm_k1_workaround_lv(sc);
   9210 		/* FALLTHROUGH */
   9211 	case WM_T_PCH:
   9212 		if (sc->sc_phytype == WMPHY_82578)
   9213 			wm_link_stall_workaround_hv(sc);
   9214 		break;
   9215 	default:
   9216 		break;
   9217 	}
   9218 
   9219 	/* Enable/Disable EEE after link up */
   9220 	if (sc->sc_phytype > WMPHY_82579)
   9221 		wm_set_eee_pchlan(sc);
   9222 }
   9223 
   9224 /*
   9225  * wm_linkintr_tbi:
   9226  *
   9227  *	Helper; handle link interrupts for TBI mode.
   9228  */
   9229 static void
   9230 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9231 {
   9232 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9233 	uint32_t status;
   9234 
   9235 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9236 		__func__));
   9237 
   9238 	status = CSR_READ(sc, WMREG_STATUS);
   9239 	if (icr & ICR_LSC) {
   9240 		wm_check_for_link(sc);
   9241 		if (status & STATUS_LU) {
   9242 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9243 				device_xname(sc->sc_dev),
   9244 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9245 			/*
   9246 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9247 			 * so we should update sc->sc_ctrl
   9248 			 */
   9249 
   9250 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9251 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9252 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9253 			if (status & STATUS_FD)
   9254 				sc->sc_tctl |=
   9255 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9256 			else
   9257 				sc->sc_tctl |=
   9258 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9259 			if (sc->sc_ctrl & CTRL_TFCE)
   9260 				sc->sc_fcrtl |= FCRTL_XONE;
   9261 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9262 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9263 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9264 			sc->sc_tbi_linkup = 1;
   9265 			if_link_state_change(ifp, LINK_STATE_UP);
   9266 		} else {
   9267 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9268 				device_xname(sc->sc_dev)));
   9269 			sc->sc_tbi_linkup = 0;
   9270 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9271 		}
   9272 		/* Update LED */
   9273 		wm_tbi_serdes_set_linkled(sc);
   9274 	} else if (icr & ICR_RXSEQ)
   9275 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9276 			device_xname(sc->sc_dev)));
   9277 }
   9278 
   9279 /*
   9280  * wm_linkintr_serdes:
   9281  *
   9282  *	Helper; handle link interrupts for TBI mode.
   9283  */
   9284 static void
   9285 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9286 {
   9287 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9288 	struct mii_data *mii = &sc->sc_mii;
   9289 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9290 	uint32_t pcs_adv, pcs_lpab, reg;
   9291 
   9292 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9293 		__func__));
   9294 
   9295 	if (icr & ICR_LSC) {
   9296 		/* Check PCS */
   9297 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9298 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9299 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9300 				device_xname(sc->sc_dev)));
   9301 			mii->mii_media_status |= IFM_ACTIVE;
   9302 			sc->sc_tbi_linkup = 1;
   9303 			if_link_state_change(ifp, LINK_STATE_UP);
   9304 		} else {
   9305 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9306 				device_xname(sc->sc_dev)));
   9307 			mii->mii_media_status |= IFM_NONE;
   9308 			sc->sc_tbi_linkup = 0;
   9309 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9310 			wm_tbi_serdes_set_linkled(sc);
   9311 			return;
   9312 		}
   9313 		mii->mii_media_active |= IFM_1000_SX;
   9314 		if ((reg & PCS_LSTS_FDX) != 0)
   9315 			mii->mii_media_active |= IFM_FDX;
   9316 		else
   9317 			mii->mii_media_active |= IFM_HDX;
   9318 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9319 			/* Check flow */
   9320 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9321 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9322 				DPRINTF(WM_DEBUG_LINK,
   9323 				    ("XXX LINKOK but not ACOMP\n"));
   9324 				return;
   9325 			}
   9326 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9327 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9328 			DPRINTF(WM_DEBUG_LINK,
   9329 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9330 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9331 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9332 				mii->mii_media_active |= IFM_FLOW
   9333 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9334 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9335 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9336 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9337 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9338 				mii->mii_media_active |= IFM_FLOW
   9339 				    | IFM_ETH_TXPAUSE;
   9340 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9341 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9342 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9343 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9344 				mii->mii_media_active |= IFM_FLOW
   9345 				    | IFM_ETH_RXPAUSE;
   9346 		}
   9347 		/* Update LED */
   9348 		wm_tbi_serdes_set_linkled(sc);
   9349 	} else
   9350 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9351 		    device_xname(sc->sc_dev)));
   9352 }
   9353 
   9354 /*
   9355  * wm_linkintr:
   9356  *
   9357  *	Helper; handle link interrupts.
   9358  */
   9359 static void
   9360 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9361 {
   9362 
   9363 	KASSERT(WM_CORE_LOCKED(sc));
   9364 
   9365 	if (sc->sc_flags & WM_F_HAS_MII)
   9366 		wm_linkintr_gmii(sc, icr);
   9367 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9368 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9369 		wm_linkintr_serdes(sc, icr);
   9370 	else
   9371 		wm_linkintr_tbi(sc, icr);
   9372 }
   9373 
   9374 /*
   9375  * wm_intr_legacy:
   9376  *
   9377  *	Interrupt service routine for INTx and MSI.
   9378  */
   9379 static int
   9380 wm_intr_legacy(void *arg)
   9381 {
   9382 	struct wm_softc *sc = arg;
   9383 	struct wm_queue *wmq = &sc->sc_queue[0];
   9384 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9385 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9386 	uint32_t icr, rndval = 0;
   9387 	int handled = 0;
   9388 
   9389 	while (1 /* CONSTCOND */) {
   9390 		icr = CSR_READ(sc, WMREG_ICR);
   9391 		if ((icr & sc->sc_icr) == 0)
   9392 			break;
   9393 		if (handled == 0)
   9394 			DPRINTF(WM_DEBUG_TX,
   9395 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9396 		if (rndval == 0)
   9397 			rndval = icr;
   9398 
   9399 		mutex_enter(rxq->rxq_lock);
   9400 
   9401 		if (rxq->rxq_stopping) {
   9402 			mutex_exit(rxq->rxq_lock);
   9403 			break;
   9404 		}
   9405 
   9406 		handled = 1;
   9407 
   9408 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9409 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9410 			DPRINTF(WM_DEBUG_RX,
   9411 			    ("%s: RX: got Rx intr 0x%08x\n",
   9412 				device_xname(sc->sc_dev),
   9413 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9414 			WM_Q_EVCNT_INCR(rxq, intr);
   9415 		}
   9416 #endif
   9417 		/*
   9418 		 * wm_rxeof() does *not* call upper layer functions directly,
   9419 		 * as if_percpuq_enqueue() just call softint_schedule().
   9420 		 * So, we can call wm_rxeof() in interrupt context.
   9421 		 */
   9422 		wm_rxeof(rxq, UINT_MAX);
   9423 		/* Fill lower bits with RX index. See below for the upper. */
   9424 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9425 
   9426 		mutex_exit(rxq->rxq_lock);
   9427 		mutex_enter(txq->txq_lock);
   9428 
   9429 		if (txq->txq_stopping) {
   9430 			mutex_exit(txq->txq_lock);
   9431 			break;
   9432 		}
   9433 
   9434 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9435 		if (icr & ICR_TXDW) {
   9436 			DPRINTF(WM_DEBUG_TX,
   9437 			    ("%s: TX: got TXDW interrupt\n",
   9438 				device_xname(sc->sc_dev)));
   9439 			WM_Q_EVCNT_INCR(txq, txdw);
   9440 		}
   9441 #endif
   9442 		wm_txeof(txq, UINT_MAX);
   9443 		/* Fill upper bits with TX index. See above for the lower. */
   9444 		rndval = txq->txq_next * WM_NRXDESC;
   9445 
   9446 		mutex_exit(txq->txq_lock);
   9447 		WM_CORE_LOCK(sc);
   9448 
   9449 		if (sc->sc_core_stopping) {
   9450 			WM_CORE_UNLOCK(sc);
   9451 			break;
   9452 		}
   9453 
   9454 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9455 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9456 			wm_linkintr(sc, icr);
   9457 		}
   9458 
   9459 		WM_CORE_UNLOCK(sc);
   9460 
   9461 		if (icr & ICR_RXO) {
   9462 #if defined(WM_DEBUG)
   9463 			log(LOG_WARNING, "%s: Receive overrun\n",
   9464 			    device_xname(sc->sc_dev));
   9465 #endif /* defined(WM_DEBUG) */
   9466 		}
   9467 	}
   9468 
   9469 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9470 
   9471 	if (handled) {
   9472 		/* Try to get more packets going. */
   9473 		softint_schedule(wmq->wmq_si);
   9474 	}
   9475 
   9476 	return handled;
   9477 }
   9478 
   9479 static inline void
   9480 wm_txrxintr_disable(struct wm_queue *wmq)
   9481 {
   9482 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9483 
   9484 	if (sc->sc_type == WM_T_82574)
   9485 		CSR_WRITE(sc, WMREG_IMC,
   9486 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9487 	else if (sc->sc_type == WM_T_82575)
   9488 		CSR_WRITE(sc, WMREG_EIMC,
   9489 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9490 	else
   9491 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9492 }
   9493 
   9494 static inline void
   9495 wm_txrxintr_enable(struct wm_queue *wmq)
   9496 {
   9497 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9498 
   9499 	wm_itrs_calculate(sc, wmq);
   9500 
   9501 	/*
   9502 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9503 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9504 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9505 	 * while each wm_handle_queue(wmq) is runnig.
   9506 	 */
   9507 	if (sc->sc_type == WM_T_82574)
   9508 		CSR_WRITE(sc, WMREG_IMS,
   9509 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9510 	else if (sc->sc_type == WM_T_82575)
   9511 		CSR_WRITE(sc, WMREG_EIMS,
   9512 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9513 	else
   9514 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9515 }
   9516 
   9517 static int
   9518 wm_txrxintr_msix(void *arg)
   9519 {
   9520 	struct wm_queue *wmq = arg;
   9521 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9522 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9523 	struct wm_softc *sc = txq->txq_sc;
   9524 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9525 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9526 	uint32_t rndval = 0;
   9527 	bool txmore;
   9528 	bool rxmore;
   9529 
   9530 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9531 
   9532 	DPRINTF(WM_DEBUG_TX,
   9533 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9534 
   9535 	wm_txrxintr_disable(wmq);
   9536 
   9537 	mutex_enter(txq->txq_lock);
   9538 
   9539 	if (txq->txq_stopping) {
   9540 		mutex_exit(txq->txq_lock);
   9541 		return 0;
   9542 	}
   9543 
   9544 	WM_Q_EVCNT_INCR(txq, txdw);
   9545 	txmore = wm_txeof(txq, txlimit);
   9546 	/* Fill upper bits with TX index. See below for the lower. */
   9547 	rndval = txq->txq_next * WM_NRXDESC;
   9548 	/* wm_deferred start() is done in wm_handle_queue(). */
   9549 	mutex_exit(txq->txq_lock);
   9550 
   9551 	DPRINTF(WM_DEBUG_RX,
   9552 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9553 	mutex_enter(rxq->rxq_lock);
   9554 
   9555 	if (rxq->rxq_stopping) {
   9556 		mutex_exit(rxq->rxq_lock);
   9557 		return 0;
   9558 	}
   9559 
   9560 	WM_Q_EVCNT_INCR(rxq, intr);
   9561 	rxmore = wm_rxeof(rxq, rxlimit);
   9562 
   9563 	/* Fill lower bits with RX index. See above for the upper. */
   9564 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9565 	mutex_exit(rxq->rxq_lock);
   9566 
   9567 	wm_itrs_writereg(sc, wmq);
   9568 
   9569 	/*
   9570 	 * This function is called in the hardware interrupt context and
   9571 	 * per-CPU, so it's not required to take a lock.
   9572 	 */
   9573 	if (rndval != 0)
   9574 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9575 
   9576 	if (txmore || rxmore)
   9577 		softint_schedule(wmq->wmq_si);
   9578 	else
   9579 		wm_txrxintr_enable(wmq);
   9580 
   9581 	return 1;
   9582 }
   9583 
   9584 static void
   9585 wm_handle_queue(void *arg)
   9586 {
   9587 	struct wm_queue *wmq = arg;
   9588 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9589 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9590 	struct wm_softc *sc = txq->txq_sc;
   9591 	u_int txlimit = sc->sc_tx_process_limit;
   9592 	u_int rxlimit = sc->sc_rx_process_limit;
   9593 	bool txmore;
   9594 	bool rxmore;
   9595 
   9596 	mutex_enter(txq->txq_lock);
   9597 	if (txq->txq_stopping) {
   9598 		mutex_exit(txq->txq_lock);
   9599 		return;
   9600 	}
   9601 	txmore = wm_txeof(txq, txlimit);
   9602 	wm_deferred_start_locked(txq);
   9603 	mutex_exit(txq->txq_lock);
   9604 
   9605 	mutex_enter(rxq->rxq_lock);
   9606 	if (rxq->rxq_stopping) {
   9607 		mutex_exit(rxq->rxq_lock);
   9608 		return;
   9609 	}
   9610 	WM_Q_EVCNT_INCR(rxq, defer);
   9611 	rxmore = wm_rxeof(rxq, rxlimit);
   9612 	mutex_exit(rxq->rxq_lock);
   9613 
   9614 	if (txmore || rxmore)
   9615 		softint_schedule(wmq->wmq_si);
   9616 	else
   9617 		wm_txrxintr_enable(wmq);
   9618 }
   9619 
   9620 /*
   9621  * wm_linkintr_msix:
   9622  *
   9623  *	Interrupt service routine for link status change for MSI-X.
   9624  */
   9625 static int
   9626 wm_linkintr_msix(void *arg)
   9627 {
   9628 	struct wm_softc *sc = arg;
   9629 	uint32_t reg;
   9630 	bool has_rxo;
   9631 
   9632 	reg = CSR_READ(sc, WMREG_ICR);
   9633 	WM_CORE_LOCK(sc);
   9634 	DPRINTF(WM_DEBUG_LINK,
   9635 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9636 		device_xname(sc->sc_dev), reg));
   9637 
   9638 	if (sc->sc_core_stopping)
   9639 		goto out;
   9640 
   9641 	if ((reg & ICR_LSC) != 0) {
   9642 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9643 		wm_linkintr(sc, ICR_LSC);
   9644 	}
   9645 
   9646 	/*
   9647 	 * XXX 82574 MSI-X mode workaround
   9648 	 *
   9649 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9650 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9651 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9652 	 * interrupts by writing WMREG_ICS to process receive packets.
   9653 	 */
   9654 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9655 #if defined(WM_DEBUG)
   9656 		log(LOG_WARNING, "%s: Receive overrun\n",
   9657 		    device_xname(sc->sc_dev));
   9658 #endif /* defined(WM_DEBUG) */
   9659 
   9660 		has_rxo = true;
   9661 		/*
   9662 		 * The RXO interrupt is very high rate when receive traffic is
   9663 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9664 		 * interrupts. ICR_OTHER will be enabled at the end of
   9665 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9666 		 * ICR_RXQ(1) interrupts.
   9667 		 */
   9668 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9669 
   9670 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9671 	}
   9672 
   9673 
   9674 
   9675 out:
   9676 	WM_CORE_UNLOCK(sc);
   9677 
   9678 	if (sc->sc_type == WM_T_82574) {
   9679 		if (!has_rxo)
   9680 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9681 		else
   9682 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9683 	} else if (sc->sc_type == WM_T_82575)
   9684 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9685 	else
   9686 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9687 
   9688 	return 1;
   9689 }
   9690 
   9691 /*
   9692  * Media related.
   9693  * GMII, SGMII, TBI (and SERDES)
   9694  */
   9695 
   9696 /* Common */
   9697 
   9698 /*
   9699  * wm_tbi_serdes_set_linkled:
   9700  *
   9701  *	Update the link LED on TBI and SERDES devices.
   9702  */
   9703 static void
   9704 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9705 {
   9706 
   9707 	if (sc->sc_tbi_linkup)
   9708 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9709 	else
   9710 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9711 
   9712 	/* 82540 or newer devices are active low */
   9713 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9714 
   9715 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9716 }
   9717 
   9718 /* GMII related */
   9719 
   9720 /*
   9721  * wm_gmii_reset:
   9722  *
   9723  *	Reset the PHY.
   9724  */
   9725 static void
   9726 wm_gmii_reset(struct wm_softc *sc)
   9727 {
   9728 	uint32_t reg;
   9729 	int rv;
   9730 
   9731 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9732 		device_xname(sc->sc_dev), __func__));
   9733 
   9734 	rv = sc->phy.acquire(sc);
   9735 	if (rv != 0) {
   9736 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9737 		    __func__);
   9738 		return;
   9739 	}
   9740 
   9741 	switch (sc->sc_type) {
   9742 	case WM_T_82542_2_0:
   9743 	case WM_T_82542_2_1:
   9744 		/* null */
   9745 		break;
   9746 	case WM_T_82543:
   9747 		/*
   9748 		 * With 82543, we need to force speed and duplex on the MAC
   9749 		 * equal to what the PHY speed and duplex configuration is.
   9750 		 * In addition, we need to perform a hardware reset on the PHY
   9751 		 * to take it out of reset.
   9752 		 */
   9753 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9754 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9755 
   9756 		/* The PHY reset pin is active-low. */
   9757 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9758 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9759 		    CTRL_EXT_SWDPIN(4));
   9760 		reg |= CTRL_EXT_SWDPIO(4);
   9761 
   9762 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9763 		CSR_WRITE_FLUSH(sc);
   9764 		delay(10*1000);
   9765 
   9766 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9767 		CSR_WRITE_FLUSH(sc);
   9768 		delay(150);
   9769 #if 0
   9770 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9771 #endif
   9772 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9773 		break;
   9774 	case WM_T_82544:	/* Reset 10000us */
   9775 	case WM_T_82540:
   9776 	case WM_T_82545:
   9777 	case WM_T_82545_3:
   9778 	case WM_T_82546:
   9779 	case WM_T_82546_3:
   9780 	case WM_T_82541:
   9781 	case WM_T_82541_2:
   9782 	case WM_T_82547:
   9783 	case WM_T_82547_2:
   9784 	case WM_T_82571:	/* Reset 100us */
   9785 	case WM_T_82572:
   9786 	case WM_T_82573:
   9787 	case WM_T_82574:
   9788 	case WM_T_82575:
   9789 	case WM_T_82576:
   9790 	case WM_T_82580:
   9791 	case WM_T_I350:
   9792 	case WM_T_I354:
   9793 	case WM_T_I210:
   9794 	case WM_T_I211:
   9795 	case WM_T_82583:
   9796 	case WM_T_80003:
   9797 		/* Generic reset */
   9798 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9799 		CSR_WRITE_FLUSH(sc);
   9800 		delay(20000);
   9801 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9802 		CSR_WRITE_FLUSH(sc);
   9803 		delay(20000);
   9804 
   9805 		if ((sc->sc_type == WM_T_82541)
   9806 		    || (sc->sc_type == WM_T_82541_2)
   9807 		    || (sc->sc_type == WM_T_82547)
   9808 		    || (sc->sc_type == WM_T_82547_2)) {
   9809 			/* Workaround for igp are done in igp_reset() */
   9810 			/* XXX add code to set LED after phy reset */
   9811 		}
   9812 		break;
   9813 	case WM_T_ICH8:
   9814 	case WM_T_ICH9:
   9815 	case WM_T_ICH10:
   9816 	case WM_T_PCH:
   9817 	case WM_T_PCH2:
   9818 	case WM_T_PCH_LPT:
   9819 	case WM_T_PCH_SPT:
   9820 	case WM_T_PCH_CNP:
   9821 		/* Generic reset */
   9822 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9823 		CSR_WRITE_FLUSH(sc);
   9824 		delay(100);
   9825 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9826 		CSR_WRITE_FLUSH(sc);
   9827 		delay(150);
   9828 		break;
   9829 	default:
   9830 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9831 		    __func__);
   9832 		break;
   9833 	}
   9834 
   9835 	sc->phy.release(sc);
   9836 
   9837 	/* get_cfg_done */
   9838 	wm_get_cfg_done(sc);
   9839 
   9840 	/* Extra setup */
   9841 	switch (sc->sc_type) {
   9842 	case WM_T_82542_2_0:
   9843 	case WM_T_82542_2_1:
   9844 	case WM_T_82543:
   9845 	case WM_T_82544:
   9846 	case WM_T_82540:
   9847 	case WM_T_82545:
   9848 	case WM_T_82545_3:
   9849 	case WM_T_82546:
   9850 	case WM_T_82546_3:
   9851 	case WM_T_82541_2:
   9852 	case WM_T_82547_2:
   9853 	case WM_T_82571:
   9854 	case WM_T_82572:
   9855 	case WM_T_82573:
   9856 	case WM_T_82574:
   9857 	case WM_T_82583:
   9858 	case WM_T_82575:
   9859 	case WM_T_82576:
   9860 	case WM_T_82580:
   9861 	case WM_T_I350:
   9862 	case WM_T_I354:
   9863 	case WM_T_I210:
   9864 	case WM_T_I211:
   9865 	case WM_T_80003:
   9866 		/* Null */
   9867 		break;
   9868 	case WM_T_82541:
   9869 	case WM_T_82547:
   9870 		/* XXX Configure actively LED after PHY reset */
   9871 		break;
   9872 	case WM_T_ICH8:
   9873 	case WM_T_ICH9:
   9874 	case WM_T_ICH10:
   9875 	case WM_T_PCH:
   9876 	case WM_T_PCH2:
   9877 	case WM_T_PCH_LPT:
   9878 	case WM_T_PCH_SPT:
   9879 	case WM_T_PCH_CNP:
   9880 		wm_phy_post_reset(sc);
   9881 		break;
   9882 	default:
   9883 		panic("%s: unknown type\n", __func__);
   9884 		break;
   9885 	}
   9886 }
   9887 
   9888 /*
   9889  * Setup sc_phytype and mii_{read|write}reg.
   9890  *
   9891  *  To identify PHY type, correct read/write function should be selected.
   9892  * To select correct read/write function, PCI ID or MAC type are required
   9893  * without accessing PHY registers.
   9894  *
   9895  *  On the first call of this function, PHY ID is not known yet. Check
   9896  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9897  * result might be incorrect.
   9898  *
   9899  *  In the second call, PHY OUI and model is used to identify PHY type.
   9900  * It might not be perfect because of the lack of compared entry, but it
   9901  * would be better than the first call.
   9902  *
   9903  *  If the detected new result and previous assumption is different,
   9904  * diagnous message will be printed.
   9905  */
   9906 static void
   9907 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9908     uint16_t phy_model)
   9909 {
   9910 	device_t dev = sc->sc_dev;
   9911 	struct mii_data *mii = &sc->sc_mii;
   9912 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9913 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9914 	mii_readreg_t new_readreg;
   9915 	mii_writereg_t new_writereg;
   9916 
   9917 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9918 		device_xname(sc->sc_dev), __func__));
   9919 
   9920 	if (mii->mii_readreg == NULL) {
   9921 		/*
   9922 		 *  This is the first call of this function. For ICH and PCH
   9923 		 * variants, it's difficult to determine the PHY access method
   9924 		 * by sc_type, so use the PCI product ID for some devices.
   9925 		 */
   9926 
   9927 		switch (sc->sc_pcidevid) {
   9928 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9929 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9930 			/* 82577 */
   9931 			new_phytype = WMPHY_82577;
   9932 			break;
   9933 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9934 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9935 			/* 82578 */
   9936 			new_phytype = WMPHY_82578;
   9937 			break;
   9938 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9939 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9940 			/* 82579 */
   9941 			new_phytype = WMPHY_82579;
   9942 			break;
   9943 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9944 		case PCI_PRODUCT_INTEL_82801I_BM:
   9945 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9946 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9947 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9948 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9949 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9950 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9951 			/* ICH8, 9, 10 with 82567 */
   9952 			new_phytype = WMPHY_BM;
   9953 			break;
   9954 		default:
   9955 			break;
   9956 		}
   9957 	} else {
   9958 		/* It's not the first call. Use PHY OUI and model */
   9959 		switch (phy_oui) {
   9960 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9961 			switch (phy_model) {
   9962 			case 0x0004: /* XXX */
   9963 				new_phytype = WMPHY_82578;
   9964 				break;
   9965 			default:
   9966 				break;
   9967 			}
   9968 			break;
   9969 		case MII_OUI_xxMARVELL:
   9970 			switch (phy_model) {
   9971 			case MII_MODEL_xxMARVELL_I210:
   9972 				new_phytype = WMPHY_I210;
   9973 				break;
   9974 			case MII_MODEL_xxMARVELL_E1011:
   9975 			case MII_MODEL_xxMARVELL_E1000_3:
   9976 			case MII_MODEL_xxMARVELL_E1000_5:
   9977 			case MII_MODEL_xxMARVELL_E1112:
   9978 				new_phytype = WMPHY_M88;
   9979 				break;
   9980 			case MII_MODEL_xxMARVELL_E1149:
   9981 				new_phytype = WMPHY_BM;
   9982 				break;
   9983 			case MII_MODEL_xxMARVELL_E1111:
   9984 			case MII_MODEL_xxMARVELL_I347:
   9985 			case MII_MODEL_xxMARVELL_E1512:
   9986 			case MII_MODEL_xxMARVELL_E1340M:
   9987 			case MII_MODEL_xxMARVELL_E1543:
   9988 				new_phytype = WMPHY_M88;
   9989 				break;
   9990 			case MII_MODEL_xxMARVELL_I82563:
   9991 				new_phytype = WMPHY_GG82563;
   9992 				break;
   9993 			default:
   9994 				break;
   9995 			}
   9996 			break;
   9997 		case MII_OUI_INTEL:
   9998 			switch (phy_model) {
   9999 			case MII_MODEL_INTEL_I82577:
   10000 				new_phytype = WMPHY_82577;
   10001 				break;
   10002 			case MII_MODEL_INTEL_I82579:
   10003 				new_phytype = WMPHY_82579;
   10004 				break;
   10005 			case MII_MODEL_INTEL_I217:
   10006 				new_phytype = WMPHY_I217;
   10007 				break;
   10008 			case MII_MODEL_INTEL_I82580:
   10009 			case MII_MODEL_INTEL_I350:
   10010 				new_phytype = WMPHY_82580;
   10011 				break;
   10012 			default:
   10013 				break;
   10014 			}
   10015 			break;
   10016 		case MII_OUI_yyINTEL:
   10017 			switch (phy_model) {
   10018 			case MII_MODEL_yyINTEL_I82562G:
   10019 			case MII_MODEL_yyINTEL_I82562EM:
   10020 			case MII_MODEL_yyINTEL_I82562ET:
   10021 				new_phytype = WMPHY_IFE;
   10022 				break;
   10023 			case MII_MODEL_yyINTEL_IGP01E1000:
   10024 				new_phytype = WMPHY_IGP;
   10025 				break;
   10026 			case MII_MODEL_yyINTEL_I82566:
   10027 				new_phytype = WMPHY_IGP_3;
   10028 				break;
   10029 			default:
   10030 				break;
   10031 			}
   10032 			break;
   10033 		default:
   10034 			break;
   10035 		}
   10036 		if (new_phytype == WMPHY_UNKNOWN)
   10037 			aprint_verbose_dev(dev,
   10038 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10039 			    __func__, phy_oui, phy_model);
   10040 
   10041 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10042 		    && (sc->sc_phytype != new_phytype )) {
   10043 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10044 			    "was incorrect. PHY type from PHY ID = %u\n",
   10045 			    sc->sc_phytype, new_phytype);
   10046 		}
   10047 	}
   10048 
   10049 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10050 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10051 		/* SGMII */
   10052 		new_readreg = wm_sgmii_readreg;
   10053 		new_writereg = wm_sgmii_writereg;
   10054 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10055 		/* BM2 (phyaddr == 1) */
   10056 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10057 		    && (new_phytype != WMPHY_BM)
   10058 		    && (new_phytype != WMPHY_UNKNOWN))
   10059 			doubt_phytype = new_phytype;
   10060 		new_phytype = WMPHY_BM;
   10061 		new_readreg = wm_gmii_bm_readreg;
   10062 		new_writereg = wm_gmii_bm_writereg;
   10063 	} else if (sc->sc_type >= WM_T_PCH) {
   10064 		/* All PCH* use _hv_ */
   10065 		new_readreg = wm_gmii_hv_readreg;
   10066 		new_writereg = wm_gmii_hv_writereg;
   10067 	} else if (sc->sc_type >= WM_T_ICH8) {
   10068 		/* non-82567 ICH8, 9 and 10 */
   10069 		new_readreg = wm_gmii_i82544_readreg;
   10070 		new_writereg = wm_gmii_i82544_writereg;
   10071 	} else if (sc->sc_type >= WM_T_80003) {
   10072 		/* 80003 */
   10073 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10074 		    && (new_phytype != WMPHY_GG82563)
   10075 		    && (new_phytype != WMPHY_UNKNOWN))
   10076 			doubt_phytype = new_phytype;
   10077 		new_phytype = WMPHY_GG82563;
   10078 		new_readreg = wm_gmii_i80003_readreg;
   10079 		new_writereg = wm_gmii_i80003_writereg;
   10080 	} else if (sc->sc_type >= WM_T_I210) {
   10081 		/* I210 and I211 */
   10082 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10083 		    && (new_phytype != WMPHY_I210)
   10084 		    && (new_phytype != WMPHY_UNKNOWN))
   10085 			doubt_phytype = new_phytype;
   10086 		new_phytype = WMPHY_I210;
   10087 		new_readreg = wm_gmii_gs40g_readreg;
   10088 		new_writereg = wm_gmii_gs40g_writereg;
   10089 	} else if (sc->sc_type >= WM_T_82580) {
   10090 		/* 82580, I350 and I354 */
   10091 		new_readreg = wm_gmii_82580_readreg;
   10092 		new_writereg = wm_gmii_82580_writereg;
   10093 	} else if (sc->sc_type >= WM_T_82544) {
   10094 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10095 		new_readreg = wm_gmii_i82544_readreg;
   10096 		new_writereg = wm_gmii_i82544_writereg;
   10097 	} else {
   10098 		new_readreg = wm_gmii_i82543_readreg;
   10099 		new_writereg = wm_gmii_i82543_writereg;
   10100 	}
   10101 
   10102 	if (new_phytype == WMPHY_BM) {
   10103 		/* All BM use _bm_ */
   10104 		new_readreg = wm_gmii_bm_readreg;
   10105 		new_writereg = wm_gmii_bm_writereg;
   10106 	}
   10107 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10108 		/* All PCH* use _hv_ */
   10109 		new_readreg = wm_gmii_hv_readreg;
   10110 		new_writereg = wm_gmii_hv_writereg;
   10111 	}
   10112 
   10113 	/* Diag output */
   10114 	if (doubt_phytype != WMPHY_UNKNOWN)
   10115 		aprint_error_dev(dev, "Assumed new PHY type was "
   10116 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10117 		    new_phytype);
   10118 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10119 	    && (sc->sc_phytype != new_phytype ))
   10120 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10121 		    "was incorrect. New PHY type = %u\n",
   10122 		    sc->sc_phytype, new_phytype);
   10123 
   10124 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10125 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10126 
   10127 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10128 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10129 		    "function was incorrect.\n");
   10130 
   10131 	/* Update now */
   10132 	sc->sc_phytype = new_phytype;
   10133 	mii->mii_readreg = new_readreg;
   10134 	mii->mii_writereg = new_writereg;
   10135 	if (new_readreg == wm_gmii_hv_readreg) {
   10136 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10137 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10138 	} else if (new_readreg == wm_sgmii_readreg) {
   10139 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10140 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10141 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10142 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10143 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10144 	}
   10145 }
   10146 
   10147 /*
   10148  * wm_get_phy_id_82575:
   10149  *
   10150  * Return PHY ID. Return -1 if it failed.
   10151  */
   10152 static int
   10153 wm_get_phy_id_82575(struct wm_softc *sc)
   10154 {
   10155 	uint32_t reg;
   10156 	int phyid = -1;
   10157 
   10158 	/* XXX */
   10159 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10160 		return -1;
   10161 
   10162 	if (wm_sgmii_uses_mdio(sc)) {
   10163 		switch (sc->sc_type) {
   10164 		case WM_T_82575:
   10165 		case WM_T_82576:
   10166 			reg = CSR_READ(sc, WMREG_MDIC);
   10167 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10168 			break;
   10169 		case WM_T_82580:
   10170 		case WM_T_I350:
   10171 		case WM_T_I354:
   10172 		case WM_T_I210:
   10173 		case WM_T_I211:
   10174 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10175 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10176 			break;
   10177 		default:
   10178 			return -1;
   10179 		}
   10180 	}
   10181 
   10182 	return phyid;
   10183 }
   10184 
   10185 
   10186 /*
   10187  * wm_gmii_mediainit:
   10188  *
   10189  *	Initialize media for use on 1000BASE-T devices.
   10190  */
   10191 static void
   10192 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10193 {
   10194 	device_t dev = sc->sc_dev;
   10195 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10196 	struct mii_data *mii = &sc->sc_mii;
   10197 	uint32_t reg;
   10198 
   10199 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10200 		device_xname(sc->sc_dev), __func__));
   10201 
   10202 	/* We have GMII. */
   10203 	sc->sc_flags |= WM_F_HAS_MII;
   10204 
   10205 	if (sc->sc_type == WM_T_80003)
   10206 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10207 	else
   10208 		sc->sc_tipg = TIPG_1000T_DFLT;
   10209 
   10210 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10211 	if ((sc->sc_type == WM_T_82580)
   10212 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10213 	    || (sc->sc_type == WM_T_I211)) {
   10214 		reg = CSR_READ(sc, WMREG_PHPM);
   10215 		reg &= ~PHPM_GO_LINK_D;
   10216 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10217 	}
   10218 
   10219 	/*
   10220 	 * Let the chip set speed/duplex on its own based on
   10221 	 * signals from the PHY.
   10222 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10223 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10224 	 */
   10225 	sc->sc_ctrl |= CTRL_SLU;
   10226 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10227 
   10228 	/* Initialize our media structures and probe the GMII. */
   10229 	mii->mii_ifp = ifp;
   10230 
   10231 	mii->mii_statchg = wm_gmii_statchg;
   10232 
   10233 	/* get PHY control from SMBus to PCIe */
   10234 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10235 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10236 	    || (sc->sc_type == WM_T_PCH_CNP))
   10237 		wm_init_phy_workarounds_pchlan(sc);
   10238 
   10239 	wm_gmii_reset(sc);
   10240 
   10241 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10242 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10243 	    wm_gmii_mediastatus);
   10244 
   10245 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10246 	    || (sc->sc_type == WM_T_82580)
   10247 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10248 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10249 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10250 			/* Attach only one port */
   10251 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10252 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10253 		} else {
   10254 			int i, id;
   10255 			uint32_t ctrl_ext;
   10256 
   10257 			id = wm_get_phy_id_82575(sc);
   10258 			if (id != -1) {
   10259 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10260 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10261 			}
   10262 			if ((id == -1)
   10263 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10264 				/* Power on sgmii phy if it is disabled */
   10265 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10266 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10267 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10268 				CSR_WRITE_FLUSH(sc);
   10269 				delay(300*1000); /* XXX too long */
   10270 
   10271 				/* From 1 to 8 */
   10272 				for (i = 1; i < 8; i++)
   10273 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10274 					    0xffffffff, i, MII_OFFSET_ANY,
   10275 					    MIIF_DOPAUSE);
   10276 
   10277 				/* Restore previous sfp cage power state */
   10278 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10279 			}
   10280 		}
   10281 	} else
   10282 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10283 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10284 
   10285 	/*
   10286 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10287 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10288 	 */
   10289 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10290 		|| (sc->sc_type == WM_T_PCH_SPT)
   10291 		|| (sc->sc_type == WM_T_PCH_CNP))
   10292 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10293 		wm_set_mdio_slow_mode_hv(sc);
   10294 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10295 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10296 	}
   10297 
   10298 	/*
   10299 	 * (For ICH8 variants)
   10300 	 * If PHY detection failed, use BM's r/w function and retry.
   10301 	 */
   10302 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10303 		/* if failed, retry with *_bm_* */
   10304 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10305 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10306 		    sc->sc_phytype);
   10307 		sc->sc_phytype = WMPHY_BM;
   10308 		mii->mii_readreg = wm_gmii_bm_readreg;
   10309 		mii->mii_writereg = wm_gmii_bm_writereg;
   10310 
   10311 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10312 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10313 	}
   10314 
   10315 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10316 		/* Any PHY wasn't find */
   10317 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10318 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10319 		sc->sc_phytype = WMPHY_NONE;
   10320 	} else {
   10321 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10322 
   10323 		/*
   10324 		 * PHY Found! Check PHY type again by the second call of
   10325 		 * wm_gmii_setup_phytype.
   10326 		 */
   10327 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10328 		    child->mii_mpd_model);
   10329 
   10330 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10331 	}
   10332 }
   10333 
   10334 /*
   10335  * wm_gmii_mediachange:	[ifmedia interface function]
   10336  *
   10337  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10338  */
   10339 static int
   10340 wm_gmii_mediachange(struct ifnet *ifp)
   10341 {
   10342 	struct wm_softc *sc = ifp->if_softc;
   10343 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10344 	int rc;
   10345 
   10346 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10347 		device_xname(sc->sc_dev), __func__));
   10348 	if ((ifp->if_flags & IFF_UP) == 0)
   10349 		return 0;
   10350 
   10351 	/* Disable D0 LPLU. */
   10352 	wm_lplu_d0_disable(sc);
   10353 
   10354 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10355 	sc->sc_ctrl |= CTRL_SLU;
   10356 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10357 	    || (sc->sc_type > WM_T_82543)) {
   10358 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10359 	} else {
   10360 		sc->sc_ctrl &= ~CTRL_ASDE;
   10361 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10362 		if (ife->ifm_media & IFM_FDX)
   10363 			sc->sc_ctrl |= CTRL_FD;
   10364 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10365 		case IFM_10_T:
   10366 			sc->sc_ctrl |= CTRL_SPEED_10;
   10367 			break;
   10368 		case IFM_100_TX:
   10369 			sc->sc_ctrl |= CTRL_SPEED_100;
   10370 			break;
   10371 		case IFM_1000_T:
   10372 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10373 			break;
   10374 		case IFM_NONE:
   10375 			/* There is no specific setting for IFM_NONE */
   10376 			break;
   10377 		default:
   10378 			panic("wm_gmii_mediachange: bad media 0x%x",
   10379 			    ife->ifm_media);
   10380 		}
   10381 	}
   10382 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10383 	CSR_WRITE_FLUSH(sc);
   10384 	if (sc->sc_type <= WM_T_82543)
   10385 		wm_gmii_reset(sc);
   10386 
   10387 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10388 		return 0;
   10389 	return rc;
   10390 }
   10391 
   10392 /*
   10393  * wm_gmii_mediastatus:	[ifmedia interface function]
   10394  *
   10395  *	Get the current interface media status on a 1000BASE-T device.
   10396  */
   10397 static void
   10398 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10399 {
   10400 	struct wm_softc *sc = ifp->if_softc;
   10401 
   10402 	ether_mediastatus(ifp, ifmr);
   10403 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10404 	    | sc->sc_flowflags;
   10405 }
   10406 
   10407 #define	MDI_IO		CTRL_SWDPIN(2)
   10408 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10409 #define	MDI_CLK		CTRL_SWDPIN(3)
   10410 
   10411 static void
   10412 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10413 {
   10414 	uint32_t i, v;
   10415 
   10416 	v = CSR_READ(sc, WMREG_CTRL);
   10417 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10418 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10419 
   10420 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10421 		if (data & i)
   10422 			v |= MDI_IO;
   10423 		else
   10424 			v &= ~MDI_IO;
   10425 		CSR_WRITE(sc, WMREG_CTRL, v);
   10426 		CSR_WRITE_FLUSH(sc);
   10427 		delay(10);
   10428 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10429 		CSR_WRITE_FLUSH(sc);
   10430 		delay(10);
   10431 		CSR_WRITE(sc, WMREG_CTRL, v);
   10432 		CSR_WRITE_FLUSH(sc);
   10433 		delay(10);
   10434 	}
   10435 }
   10436 
   10437 static uint16_t
   10438 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10439 {
   10440 	uint32_t v, i;
   10441 	uint16_t data = 0;
   10442 
   10443 	v = CSR_READ(sc, WMREG_CTRL);
   10444 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10445 	v |= CTRL_SWDPIO(3);
   10446 
   10447 	CSR_WRITE(sc, WMREG_CTRL, v);
   10448 	CSR_WRITE_FLUSH(sc);
   10449 	delay(10);
   10450 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10451 	CSR_WRITE_FLUSH(sc);
   10452 	delay(10);
   10453 	CSR_WRITE(sc, WMREG_CTRL, v);
   10454 	CSR_WRITE_FLUSH(sc);
   10455 	delay(10);
   10456 
   10457 	for (i = 0; i < 16; i++) {
   10458 		data <<= 1;
   10459 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10460 		CSR_WRITE_FLUSH(sc);
   10461 		delay(10);
   10462 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10463 			data |= 1;
   10464 		CSR_WRITE(sc, WMREG_CTRL, v);
   10465 		CSR_WRITE_FLUSH(sc);
   10466 		delay(10);
   10467 	}
   10468 
   10469 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10470 	CSR_WRITE_FLUSH(sc);
   10471 	delay(10);
   10472 	CSR_WRITE(sc, WMREG_CTRL, v);
   10473 	CSR_WRITE_FLUSH(sc);
   10474 	delay(10);
   10475 
   10476 	return data;
   10477 }
   10478 
   10479 #undef MDI_IO
   10480 #undef MDI_DIR
   10481 #undef MDI_CLK
   10482 
   10483 /*
   10484  * wm_gmii_i82543_readreg:	[mii interface function]
   10485  *
   10486  *	Read a PHY register on the GMII (i82543 version).
   10487  */
   10488 static int
   10489 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10490 {
   10491 	struct wm_softc *sc = device_private(dev);
   10492 
   10493 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10494 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10495 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10496 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10497 
   10498 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10499 		device_xname(dev), phy, reg, *val));
   10500 
   10501 	return 0;
   10502 }
   10503 
   10504 /*
   10505  * wm_gmii_i82543_writereg:	[mii interface function]
   10506  *
   10507  *	Write a PHY register on the GMII (i82543 version).
   10508  */
   10509 static int
   10510 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10511 {
   10512 	struct wm_softc *sc = device_private(dev);
   10513 
   10514 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10515 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10516 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10517 	    (MII_COMMAND_START << 30), 32);
   10518 
   10519 	return 0;
   10520 }
   10521 
   10522 /*
   10523  * wm_gmii_mdic_readreg:	[mii interface function]
   10524  *
   10525  *	Read a PHY register on the GMII.
   10526  */
   10527 static int
   10528 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10529 {
   10530 	struct wm_softc *sc = device_private(dev);
   10531 	uint32_t mdic = 0;
   10532 	int i;
   10533 
   10534 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10535 	    && (reg > MII_ADDRMASK)) {
   10536 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10537 		    __func__, sc->sc_phytype, reg);
   10538 		reg &= MII_ADDRMASK;
   10539 	}
   10540 
   10541 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10542 	    MDIC_REGADD(reg));
   10543 
   10544 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10545 		delay(50);
   10546 		mdic = CSR_READ(sc, WMREG_MDIC);
   10547 		if (mdic & MDIC_READY)
   10548 			break;
   10549 	}
   10550 
   10551 	if ((mdic & MDIC_READY) == 0) {
   10552 		DPRINTF(WM_DEBUG_GMII,
   10553 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10554 			device_xname(dev), phy, reg));
   10555 		return ETIMEDOUT;
   10556 	} else if (mdic & MDIC_E) {
   10557 		/* This is normal if no PHY is present. */
   10558 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10559 			device_xname(sc->sc_dev), phy, reg));
   10560 		return -1;
   10561 	} else
   10562 		*val = MDIC_DATA(mdic);
   10563 
   10564 	/*
   10565 	 * Allow some time after each MDIC transaction to avoid
   10566 	 * reading duplicate data in the next MDIC transaction.
   10567 	 */
   10568 	if (sc->sc_type == WM_T_PCH2)
   10569 		delay(100);
   10570 
   10571 	return 0;
   10572 }
   10573 
   10574 /*
   10575  * wm_gmii_mdic_writereg:	[mii interface function]
   10576  *
   10577  *	Write a PHY register on the GMII.
   10578  */
   10579 static int
   10580 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10581 {
   10582 	struct wm_softc *sc = device_private(dev);
   10583 	uint32_t mdic = 0;
   10584 	int i;
   10585 
   10586 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10587 	    && (reg > MII_ADDRMASK)) {
   10588 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10589 		    __func__, sc->sc_phytype, reg);
   10590 		reg &= MII_ADDRMASK;
   10591 	}
   10592 
   10593 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10594 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10595 
   10596 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10597 		delay(50);
   10598 		mdic = CSR_READ(sc, WMREG_MDIC);
   10599 		if (mdic & MDIC_READY)
   10600 			break;
   10601 	}
   10602 
   10603 	if ((mdic & MDIC_READY) == 0) {
   10604 		DPRINTF(WM_DEBUG_GMII,
   10605 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10606 			device_xname(dev), phy, reg));
   10607 		return ETIMEDOUT;
   10608 	} else if (mdic & MDIC_E) {
   10609 		DPRINTF(WM_DEBUG_GMII,
   10610 		    ("%s: MDIC write error: phy %d reg %d\n",
   10611 			device_xname(dev), phy, reg));
   10612 		return -1;
   10613 	}
   10614 
   10615 	/*
   10616 	 * Allow some time after each MDIC transaction to avoid
   10617 	 * reading duplicate data in the next MDIC transaction.
   10618 	 */
   10619 	if (sc->sc_type == WM_T_PCH2)
   10620 		delay(100);
   10621 
   10622 	return 0;
   10623 }
   10624 
   10625 /*
   10626  * wm_gmii_i82544_readreg:	[mii interface function]
   10627  *
   10628  *	Read a PHY register on the GMII.
   10629  */
   10630 static int
   10631 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10632 {
   10633 	struct wm_softc *sc = device_private(dev);
   10634 	int rv;
   10635 
   10636 	if (sc->phy.acquire(sc)) {
   10637 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10638 		return -1;
   10639 	}
   10640 
   10641 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10642 
   10643 	sc->phy.release(sc);
   10644 
   10645 	return rv;
   10646 }
   10647 
   10648 static int
   10649 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10650 {
   10651 	struct wm_softc *sc = device_private(dev);
   10652 	int rv;
   10653 
   10654 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10655 		switch (sc->sc_phytype) {
   10656 		case WMPHY_IGP:
   10657 		case WMPHY_IGP_2:
   10658 		case WMPHY_IGP_3:
   10659 			rv = wm_gmii_mdic_writereg(dev, phy,
   10660 			    MII_IGPHY_PAGE_SELECT, reg);
   10661 			if (rv != 0)
   10662 				return rv;
   10663 			break;
   10664 		default:
   10665 #ifdef WM_DEBUG
   10666 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10667 			    __func__, sc->sc_phytype, reg);
   10668 #endif
   10669 			break;
   10670 		}
   10671 	}
   10672 
   10673 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10674 }
   10675 
   10676 /*
   10677  * wm_gmii_i82544_writereg:	[mii interface function]
   10678  *
   10679  *	Write a PHY register on the GMII.
   10680  */
   10681 static int
   10682 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10683 {
   10684 	struct wm_softc *sc = device_private(dev);
   10685 	int rv;
   10686 
   10687 	if (sc->phy.acquire(sc)) {
   10688 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10689 		return -1;
   10690 	}
   10691 
   10692 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10693 	sc->phy.release(sc);
   10694 
   10695 	return rv;
   10696 }
   10697 
   10698 static int
   10699 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10700 {
   10701 	struct wm_softc *sc = device_private(dev);
   10702 	int rv;
   10703 
   10704 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10705 		switch (sc->sc_phytype) {
   10706 		case WMPHY_IGP:
   10707 		case WMPHY_IGP_2:
   10708 		case WMPHY_IGP_3:
   10709 			rv = wm_gmii_mdic_writereg(dev, phy,
   10710 			    MII_IGPHY_PAGE_SELECT, reg);
   10711 			if (rv != 0)
   10712 				return rv;
   10713 			break;
   10714 		default:
   10715 #ifdef WM_DEBUG
   10716 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10717 			    __func__, sc->sc_phytype, reg);
   10718 #endif
   10719 			break;
   10720 		}
   10721 	}
   10722 
   10723 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10724 }
   10725 
   10726 /*
   10727  * wm_gmii_i80003_readreg:	[mii interface function]
   10728  *
   10729  *	Read a PHY register on the kumeran
   10730  * This could be handled by the PHY layer if we didn't have to lock the
   10731  * ressource ...
   10732  */
   10733 static int
   10734 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10735 {
   10736 	struct wm_softc *sc = device_private(dev);
   10737 	int page_select;
   10738 	uint16_t temp, temp2;
   10739 	int rv = 0;
   10740 
   10741 	if (phy != 1) /* Only one PHY on kumeran bus */
   10742 		return -1;
   10743 
   10744 	if (sc->phy.acquire(sc)) {
   10745 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10746 		return -1;
   10747 	}
   10748 
   10749 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10750 		page_select = GG82563_PHY_PAGE_SELECT;
   10751 	else {
   10752 		/*
   10753 		 * Use Alternative Page Select register to access registers
   10754 		 * 30 and 31.
   10755 		 */
   10756 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10757 	}
   10758 	temp = reg >> GG82563_PAGE_SHIFT;
   10759 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10760 		goto out;
   10761 
   10762 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10763 		/*
   10764 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10765 		 * register.
   10766 		 */
   10767 		delay(200);
   10768 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10769 		if ((rv != 0) || (temp2 != temp)) {
   10770 			device_printf(dev, "%s failed\n", __func__);
   10771 			rv = -1;
   10772 			goto out;
   10773 		}
   10774 		delay(200);
   10775 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10776 		delay(200);
   10777 	} else
   10778 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10779 
   10780 out:
   10781 	sc->phy.release(sc);
   10782 	return rv;
   10783 }
   10784 
   10785 /*
   10786  * wm_gmii_i80003_writereg:	[mii interface function]
   10787  *
   10788  *	Write a PHY register on the kumeran.
   10789  * This could be handled by the PHY layer if we didn't have to lock the
   10790  * ressource ...
   10791  */
   10792 static int
   10793 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10794 {
   10795 	struct wm_softc *sc = device_private(dev);
   10796 	int page_select, rv;
   10797 	uint16_t temp, temp2;
   10798 
   10799 	if (phy != 1) /* Only one PHY on kumeran bus */
   10800 		return -1;
   10801 
   10802 	if (sc->phy.acquire(sc)) {
   10803 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10804 		return -1;
   10805 	}
   10806 
   10807 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10808 		page_select = GG82563_PHY_PAGE_SELECT;
   10809 	else {
   10810 		/*
   10811 		 * Use Alternative Page Select register to access registers
   10812 		 * 30 and 31.
   10813 		 */
   10814 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10815 	}
   10816 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10817 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10818 		goto out;
   10819 
   10820 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10821 		/*
   10822 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10823 		 * register.
   10824 		 */
   10825 		delay(200);
   10826 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10827 		if ((rv != 0) || (temp2 != temp)) {
   10828 			device_printf(dev, "%s failed\n", __func__);
   10829 			rv = -1;
   10830 			goto out;
   10831 		}
   10832 		delay(200);
   10833 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10834 		delay(200);
   10835 	} else
   10836 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10837 
   10838 out:
   10839 	sc->phy.release(sc);
   10840 	return rv;
   10841 }
   10842 
   10843 /*
   10844  * wm_gmii_bm_readreg:	[mii interface function]
   10845  *
   10846  *	Read a PHY register on the kumeran
   10847  * This could be handled by the PHY layer if we didn't have to lock the
   10848  * ressource ...
   10849  */
   10850 static int
   10851 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10852 {
   10853 	struct wm_softc *sc = device_private(dev);
   10854 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10855 	int rv;
   10856 
   10857 	if (sc->phy.acquire(sc)) {
   10858 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10859 		return -1;
   10860 	}
   10861 
   10862 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10863 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10864 		    || (reg == 31)) ? 1 : phy;
   10865 	/* Page 800 works differently than the rest so it has its own func */
   10866 	if (page == BM_WUC_PAGE) {
   10867 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10868 		goto release;
   10869 	}
   10870 
   10871 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10872 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10873 		    && (sc->sc_type != WM_T_82583))
   10874 			rv = wm_gmii_mdic_writereg(dev, phy,
   10875 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10876 		else
   10877 			rv = wm_gmii_mdic_writereg(dev, phy,
   10878 			    BME1000_PHY_PAGE_SELECT, page);
   10879 		if (rv != 0)
   10880 			goto release;
   10881 	}
   10882 
   10883 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10884 
   10885 release:
   10886 	sc->phy.release(sc);
   10887 	return rv;
   10888 }
   10889 
   10890 /*
   10891  * wm_gmii_bm_writereg:	[mii interface function]
   10892  *
   10893  *	Write a PHY register on the kumeran.
   10894  * This could be handled by the PHY layer if we didn't have to lock the
   10895  * ressource ...
   10896  */
   10897 static int
   10898 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10899 {
   10900 	struct wm_softc *sc = device_private(dev);
   10901 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10902 	int rv;
   10903 
   10904 	if (sc->phy.acquire(sc)) {
   10905 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10906 		return -1;
   10907 	}
   10908 
   10909 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10910 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10911 		    || (reg == 31)) ? 1 : phy;
   10912 	/* Page 800 works differently than the rest so it has its own func */
   10913 	if (page == BM_WUC_PAGE) {
   10914 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10915 		goto release;
   10916 	}
   10917 
   10918 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10919 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10920 		    && (sc->sc_type != WM_T_82583))
   10921 			rv = wm_gmii_mdic_writereg(dev, phy,
   10922 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10923 		else
   10924 			rv = wm_gmii_mdic_writereg(dev, phy,
   10925 			    BME1000_PHY_PAGE_SELECT, page);
   10926 		if (rv != 0)
   10927 			goto release;
   10928 	}
   10929 
   10930 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10931 
   10932 release:
   10933 	sc->phy.release(sc);
   10934 	return rv;
   10935 }
   10936 
   10937 /*
   10938  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10939  *  @dev: pointer to the HW structure
   10940  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10941  *
   10942  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10943  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10944  */
   10945 static int
   10946 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10947 {
   10948 	uint16_t temp;
   10949 	int rv;
   10950 
   10951 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10952 		device_xname(dev), __func__));
   10953 
   10954 	if (!phy_regp)
   10955 		return -1;
   10956 
   10957 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10958 
   10959 	/* Select Port Control Registers page */
   10960 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10961 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10962 	if (rv != 0)
   10963 		return rv;
   10964 
   10965 	/* Read WUCE and save it */
   10966 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10967 	if (rv != 0)
   10968 		return rv;
   10969 
   10970 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10971 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10972 	 */
   10973 	temp = *phy_regp;
   10974 	temp |= BM_WUC_ENABLE_BIT;
   10975 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10976 
   10977 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10978 		return rv;
   10979 
   10980 	/* Select Host Wakeup Registers page - caller now able to write
   10981 	 * registers on the Wakeup registers page
   10982 	 */
   10983 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10984 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10985 }
   10986 
   10987 /*
   10988  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10989  *  @dev: pointer to the HW structure
   10990  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10991  *
   10992  *  Restore BM_WUC_ENABLE_REG to its original value.
   10993  *
   10994  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10995  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10996  *  caller.
   10997  */
   10998 static int
   10999 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11000 {
   11001 
   11002 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11003 		device_xname(dev), __func__));
   11004 
   11005 	if (!phy_regp)
   11006 		return -1;
   11007 
   11008 	/* Select Port Control Registers page */
   11009 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11010 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11011 
   11012 	/* Restore 769.17 to its original value */
   11013 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11014 
   11015 	return 0;
   11016 }
   11017 
   11018 /*
   11019  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11020  *  @sc: pointer to the HW structure
   11021  *  @offset: register offset to be read or written
   11022  *  @val: pointer to the data to read or write
   11023  *  @rd: determines if operation is read or write
   11024  *  @page_set: BM_WUC_PAGE already set and access enabled
   11025  *
   11026  *  Read the PHY register at offset and store the retrieved information in
   11027  *  data, or write data to PHY register at offset.  Note the procedure to
   11028  *  access the PHY wakeup registers is different than reading the other PHY
   11029  *  registers. It works as such:
   11030  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11031  *  2) Set page to 800 for host (801 if we were manageability)
   11032  *  3) Write the address using the address opcode (0x11)
   11033  *  4) Read or write the data using the data opcode (0x12)
   11034  *  5) Restore 769.17.2 to its original value
   11035  *
   11036  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11037  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11038  *
   11039  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11040  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11041  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11042  */
   11043 static int
   11044 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11045 	bool page_set)
   11046 {
   11047 	struct wm_softc *sc = device_private(dev);
   11048 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11049 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11050 	uint16_t wuce;
   11051 	int rv = 0;
   11052 
   11053 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11054 		device_xname(dev), __func__));
   11055 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11056 	if ((sc->sc_type == WM_T_PCH)
   11057 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11058 		device_printf(dev,
   11059 		    "Attempting to access page %d while gig enabled.\n", page);
   11060 	}
   11061 
   11062 	if (!page_set) {
   11063 		/* Enable access to PHY wakeup registers */
   11064 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11065 		if (rv != 0) {
   11066 			device_printf(dev,
   11067 			    "%s: Could not enable PHY wakeup reg access\n",
   11068 			    __func__);
   11069 			return rv;
   11070 		}
   11071 	}
   11072 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11073 		device_xname(sc->sc_dev), __func__, page, regnum));
   11074 
   11075 	/*
   11076 	 * 2) Access PHY wakeup register.
   11077 	 * See wm_access_phy_wakeup_reg_bm.
   11078 	 */
   11079 
   11080 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11081 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11082 	if (rv != 0)
   11083 		return rv;
   11084 
   11085 	if (rd) {
   11086 		/* Read the Wakeup register page value using opcode 0x12 */
   11087 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11088 	} else {
   11089 		/* Write the Wakeup register page value using opcode 0x12 */
   11090 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11091 	}
   11092 	if (rv != 0)
   11093 		return rv;
   11094 
   11095 	if (!page_set)
   11096 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11097 
   11098 	return rv;
   11099 }
   11100 
   11101 /*
   11102  * wm_gmii_hv_readreg:	[mii interface function]
   11103  *
   11104  *	Read a PHY register on the kumeran
   11105  * This could be handled by the PHY layer if we didn't have to lock the
   11106  * ressource ...
   11107  */
   11108 static int
   11109 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11110 {
   11111 	struct wm_softc *sc = device_private(dev);
   11112 	int rv;
   11113 
   11114 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11115 		device_xname(dev), __func__));
   11116 	if (sc->phy.acquire(sc)) {
   11117 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11118 		return -1;
   11119 	}
   11120 
   11121 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11122 	sc->phy.release(sc);
   11123 	return rv;
   11124 }
   11125 
   11126 static int
   11127 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11128 {
   11129 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11130 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11131 	int rv;
   11132 
   11133 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11134 
   11135 	/* Page 800 works differently than the rest so it has its own func */
   11136 	if (page == BM_WUC_PAGE)
   11137 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11138 
   11139 	/*
   11140 	 * Lower than page 768 works differently than the rest so it has its
   11141 	 * own func
   11142 	 */
   11143 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11144 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11145 		return -1;
   11146 	}
   11147 
   11148 	/*
   11149 	 * XXX I21[789] documents say that the SMBus Address register is at
   11150 	 * PHY address 01, Page 0 (not 768), Register 26.
   11151 	 */
   11152 	if (page == HV_INTC_FC_PAGE_START)
   11153 		page = 0;
   11154 
   11155 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11156 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11157 		    page << BME1000_PAGE_SHIFT);
   11158 		if (rv != 0)
   11159 			return rv;
   11160 	}
   11161 
   11162 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11163 }
   11164 
   11165 /*
   11166  * wm_gmii_hv_writereg:	[mii interface function]
   11167  *
   11168  *	Write a PHY register on the kumeran.
   11169  * This could be handled by the PHY layer if we didn't have to lock the
   11170  * ressource ...
   11171  */
   11172 static int
   11173 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11174 {
   11175 	struct wm_softc *sc = device_private(dev);
   11176 	int rv;
   11177 
   11178 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11179 		device_xname(dev), __func__));
   11180 
   11181 	if (sc->phy.acquire(sc)) {
   11182 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11183 		return -1;
   11184 	}
   11185 
   11186 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11187 	sc->phy.release(sc);
   11188 
   11189 	return rv;
   11190 }
   11191 
   11192 static int
   11193 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11194 {
   11195 	struct wm_softc *sc = device_private(dev);
   11196 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11197 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11198 	int rv;
   11199 
   11200 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11201 
   11202 	/* Page 800 works differently than the rest so it has its own func */
   11203 	if (page == BM_WUC_PAGE)
   11204 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11205 		    false);
   11206 
   11207 	/*
   11208 	 * Lower than page 768 works differently than the rest so it has its
   11209 	 * own func
   11210 	 */
   11211 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11212 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11213 		return -1;
   11214 	}
   11215 
   11216 	{
   11217 		/*
   11218 		 * XXX I21[789] documents say that the SMBus Address register
   11219 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11220 		 */
   11221 		if (page == HV_INTC_FC_PAGE_START)
   11222 			page = 0;
   11223 
   11224 		/*
   11225 		 * XXX Workaround MDIO accesses being disabled after entering
   11226 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11227 		 * register is set)
   11228 		 */
   11229 		if (sc->sc_phytype == WMPHY_82578) {
   11230 			struct mii_softc *child;
   11231 
   11232 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11233 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11234 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11235 			    && ((val & (1 << 11)) != 0)) {
   11236 				device_printf(dev, "XXX need workaround\n");
   11237 			}
   11238 		}
   11239 
   11240 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11241 			rv = wm_gmii_mdic_writereg(dev, 1,
   11242 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11243 			if (rv != 0)
   11244 				return rv;
   11245 		}
   11246 	}
   11247 
   11248 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11249 }
   11250 
   11251 /*
   11252  * wm_gmii_82580_readreg:	[mii interface function]
   11253  *
   11254  *	Read a PHY register on the 82580 and I350.
   11255  * This could be handled by the PHY layer if we didn't have to lock the
   11256  * ressource ...
   11257  */
   11258 static int
   11259 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11260 {
   11261 	struct wm_softc *sc = device_private(dev);
   11262 	int rv;
   11263 
   11264 	if (sc->phy.acquire(sc) != 0) {
   11265 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11266 		return -1;
   11267 	}
   11268 
   11269 #ifdef DIAGNOSTIC
   11270 	if (reg > MII_ADDRMASK) {
   11271 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11272 		    __func__, sc->sc_phytype, reg);
   11273 		reg &= MII_ADDRMASK;
   11274 	}
   11275 #endif
   11276 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11277 
   11278 	sc->phy.release(sc);
   11279 	return rv;
   11280 }
   11281 
   11282 /*
   11283  * wm_gmii_82580_writereg:	[mii interface function]
   11284  *
   11285  *	Write a PHY register on the 82580 and I350.
   11286  * This could be handled by the PHY layer if we didn't have to lock the
   11287  * ressource ...
   11288  */
   11289 static int
   11290 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11291 {
   11292 	struct wm_softc *sc = device_private(dev);
   11293 	int rv;
   11294 
   11295 	if (sc->phy.acquire(sc) != 0) {
   11296 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11297 		return -1;
   11298 	}
   11299 
   11300 #ifdef DIAGNOSTIC
   11301 	if (reg > MII_ADDRMASK) {
   11302 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11303 		    __func__, sc->sc_phytype, reg);
   11304 		reg &= MII_ADDRMASK;
   11305 	}
   11306 #endif
   11307 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11308 
   11309 	sc->phy.release(sc);
   11310 	return rv;
   11311 }
   11312 
   11313 /*
   11314  * wm_gmii_gs40g_readreg:	[mii interface function]
   11315  *
   11316  *	Read a PHY register on the I2100 and I211.
   11317  * This could be handled by the PHY layer if we didn't have to lock the
   11318  * ressource ...
   11319  */
   11320 static int
   11321 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11322 {
   11323 	struct wm_softc *sc = device_private(dev);
   11324 	int page, offset;
   11325 	int rv;
   11326 
   11327 	/* Acquire semaphore */
   11328 	if (sc->phy.acquire(sc)) {
   11329 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11330 		return -1;
   11331 	}
   11332 
   11333 	/* Page select */
   11334 	page = reg >> GS40G_PAGE_SHIFT;
   11335 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11336 	if (rv != 0)
   11337 		goto release;
   11338 
   11339 	/* Read reg */
   11340 	offset = reg & GS40G_OFFSET_MASK;
   11341 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11342 
   11343 release:
   11344 	sc->phy.release(sc);
   11345 	return rv;
   11346 }
   11347 
   11348 /*
   11349  * wm_gmii_gs40g_writereg:	[mii interface function]
   11350  *
   11351  *	Write a PHY register on the I210 and I211.
   11352  * This could be handled by the PHY layer if we didn't have to lock the
   11353  * ressource ...
   11354  */
   11355 static int
   11356 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11357 {
   11358 	struct wm_softc *sc = device_private(dev);
   11359 	uint16_t page;
   11360 	int offset, rv;
   11361 
   11362 	/* Acquire semaphore */
   11363 	if (sc->phy.acquire(sc)) {
   11364 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11365 		return -1;
   11366 	}
   11367 
   11368 	/* Page select */
   11369 	page = reg >> GS40G_PAGE_SHIFT;
   11370 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11371 	if (rv != 0)
   11372 		goto release;
   11373 
   11374 	/* Write reg */
   11375 	offset = reg & GS40G_OFFSET_MASK;
   11376 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11377 
   11378 release:
   11379 	/* Release semaphore */
   11380 	sc->phy.release(sc);
   11381 	return rv;
   11382 }
   11383 
   11384 /*
   11385  * wm_gmii_statchg:	[mii interface function]
   11386  *
   11387  *	Callback from MII layer when media changes.
   11388  */
   11389 static void
   11390 wm_gmii_statchg(struct ifnet *ifp)
   11391 {
   11392 	struct wm_softc *sc = ifp->if_softc;
   11393 	struct mii_data *mii = &sc->sc_mii;
   11394 
   11395 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11396 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11397 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11398 
   11399 	/* Get flow control negotiation result. */
   11400 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11401 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11402 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11403 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11404 	}
   11405 
   11406 	if (sc->sc_flowflags & IFM_FLOW) {
   11407 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11408 			sc->sc_ctrl |= CTRL_TFCE;
   11409 			sc->sc_fcrtl |= FCRTL_XONE;
   11410 		}
   11411 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11412 			sc->sc_ctrl |= CTRL_RFCE;
   11413 	}
   11414 
   11415 	if (mii->mii_media_active & IFM_FDX) {
   11416 		DPRINTF(WM_DEBUG_LINK,
   11417 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11418 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11419 	} else {
   11420 		DPRINTF(WM_DEBUG_LINK,
   11421 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11422 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11423 	}
   11424 
   11425 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11426 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11427 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11428 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11429 	if (sc->sc_type == WM_T_80003) {
   11430 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11431 		case IFM_1000_T:
   11432 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11433 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11434 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11435 			break;
   11436 		default:
   11437 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11438 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11439 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11440 			break;
   11441 		}
   11442 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11443 	}
   11444 }
   11445 
   11446 /* kumeran related (80003, ICH* and PCH*) */
   11447 
   11448 /*
   11449  * wm_kmrn_readreg:
   11450  *
   11451  *	Read a kumeran register
   11452  */
   11453 static int
   11454 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11455 {
   11456 	int rv;
   11457 
   11458 	if (sc->sc_type == WM_T_80003)
   11459 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11460 	else
   11461 		rv = sc->phy.acquire(sc);
   11462 	if (rv != 0) {
   11463 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11464 		    __func__);
   11465 		return rv;
   11466 	}
   11467 
   11468 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11469 
   11470 	if (sc->sc_type == WM_T_80003)
   11471 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11472 	else
   11473 		sc->phy.release(sc);
   11474 
   11475 	return rv;
   11476 }
   11477 
   11478 static int
   11479 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11480 {
   11481 
   11482 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11483 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11484 	    KUMCTRLSTA_REN);
   11485 	CSR_WRITE_FLUSH(sc);
   11486 	delay(2);
   11487 
   11488 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11489 
   11490 	return 0;
   11491 }
   11492 
   11493 /*
   11494  * wm_kmrn_writereg:
   11495  *
   11496  *	Write a kumeran register
   11497  */
   11498 static int
   11499 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11500 {
   11501 	int rv;
   11502 
   11503 	if (sc->sc_type == WM_T_80003)
   11504 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11505 	else
   11506 		rv = sc->phy.acquire(sc);
   11507 	if (rv != 0) {
   11508 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11509 		    __func__);
   11510 		return rv;
   11511 	}
   11512 
   11513 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11514 
   11515 	if (sc->sc_type == WM_T_80003)
   11516 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11517 	else
   11518 		sc->phy.release(sc);
   11519 
   11520 	return rv;
   11521 }
   11522 
   11523 static int
   11524 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11525 {
   11526 
   11527 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11528 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11529 
   11530 	return 0;
   11531 }
   11532 
   11533 /*
   11534  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11535  * This access method is different from IEEE MMD.
   11536  */
   11537 static int
   11538 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11539 {
   11540 	struct wm_softc *sc = device_private(dev);
   11541 	int rv;
   11542 
   11543 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11544 	if (rv != 0)
   11545 		return rv;
   11546 
   11547 	if (rd)
   11548 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11549 	else
   11550 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11551 	return rv;
   11552 }
   11553 
   11554 static int
   11555 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11556 {
   11557 
   11558 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11559 }
   11560 
   11561 static int
   11562 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11563 {
   11564 
   11565 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11566 }
   11567 
   11568 /* SGMII related */
   11569 
   11570 /*
   11571  * wm_sgmii_uses_mdio
   11572  *
   11573  * Check whether the transaction is to the internal PHY or the external
   11574  * MDIO interface. Return true if it's MDIO.
   11575  */
   11576 static bool
   11577 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11578 {
   11579 	uint32_t reg;
   11580 	bool ismdio = false;
   11581 
   11582 	switch (sc->sc_type) {
   11583 	case WM_T_82575:
   11584 	case WM_T_82576:
   11585 		reg = CSR_READ(sc, WMREG_MDIC);
   11586 		ismdio = ((reg & MDIC_DEST) != 0);
   11587 		break;
   11588 	case WM_T_82580:
   11589 	case WM_T_I350:
   11590 	case WM_T_I354:
   11591 	case WM_T_I210:
   11592 	case WM_T_I211:
   11593 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11594 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11595 		break;
   11596 	default:
   11597 		break;
   11598 	}
   11599 
   11600 	return ismdio;
   11601 }
   11602 
   11603 /*
   11604  * wm_sgmii_readreg:	[mii interface function]
   11605  *
   11606  *	Read a PHY register on the SGMII
   11607  * This could be handled by the PHY layer if we didn't have to lock the
   11608  * ressource ...
   11609  */
   11610 static int
   11611 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11612 {
   11613 	struct wm_softc *sc = device_private(dev);
   11614 	int rv;
   11615 
   11616 	if (sc->phy.acquire(sc)) {
   11617 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11618 		return -1;
   11619 	}
   11620 
   11621 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11622 
   11623 	sc->phy.release(sc);
   11624 	return rv;
   11625 }
   11626 
   11627 static int
   11628 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11629 {
   11630 	struct wm_softc *sc = device_private(dev);
   11631 	uint32_t i2ccmd;
   11632 	int i, rv = 0;
   11633 
   11634 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11635 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11636 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11637 
   11638 	/* Poll the ready bit */
   11639 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11640 		delay(50);
   11641 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11642 		if (i2ccmd & I2CCMD_READY)
   11643 			break;
   11644 	}
   11645 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11646 		device_printf(dev, "I2CCMD Read did not complete\n");
   11647 		rv = ETIMEDOUT;
   11648 	}
   11649 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11650 		device_printf(dev, "I2CCMD Error bit set\n");
   11651 		rv = EIO;
   11652 	}
   11653 
   11654 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11655 
   11656 	return rv;
   11657 }
   11658 
   11659 /*
   11660  * wm_sgmii_writereg:	[mii interface function]
   11661  *
   11662  *	Write a PHY register on the SGMII.
   11663  * This could be handled by the PHY layer if we didn't have to lock the
   11664  * ressource ...
   11665  */
   11666 static int
   11667 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11668 {
   11669 	struct wm_softc *sc = device_private(dev);
   11670 	int rv;
   11671 
   11672 	if (sc->phy.acquire(sc) != 0) {
   11673 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11674 		return -1;
   11675 	}
   11676 
   11677 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11678 
   11679 	sc->phy.release(sc);
   11680 
   11681 	return rv;
   11682 }
   11683 
   11684 static int
   11685 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11686 {
   11687 	struct wm_softc *sc = device_private(dev);
   11688 	uint32_t i2ccmd;
   11689 	uint16_t swapdata;
   11690 	int rv = 0;
   11691 	int i;
   11692 
   11693 	/* Swap the data bytes for the I2C interface */
   11694 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11695 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11696 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11697 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11698 
   11699 	/* Poll the ready bit */
   11700 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11701 		delay(50);
   11702 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11703 		if (i2ccmd & I2CCMD_READY)
   11704 			break;
   11705 	}
   11706 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11707 		device_printf(dev, "I2CCMD Write did not complete\n");
   11708 		rv = ETIMEDOUT;
   11709 	}
   11710 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11711 		device_printf(dev, "I2CCMD Error bit set\n");
   11712 		rv = EIO;
   11713 	}
   11714 
   11715 	return rv;
   11716 }
   11717 
   11718 /* TBI related */
   11719 
   11720 static bool
   11721 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11722 {
   11723 	bool sig;
   11724 
   11725 	sig = ctrl & CTRL_SWDPIN(1);
   11726 
   11727 	/*
   11728 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11729 	 * detect a signal, 1 if they don't.
   11730 	 */
   11731 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11732 		sig = !sig;
   11733 
   11734 	return sig;
   11735 }
   11736 
   11737 /*
   11738  * wm_tbi_mediainit:
   11739  *
   11740  *	Initialize media for use on 1000BASE-X devices.
   11741  */
   11742 static void
   11743 wm_tbi_mediainit(struct wm_softc *sc)
   11744 {
   11745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11746 	const char *sep = "";
   11747 
   11748 	if (sc->sc_type < WM_T_82543)
   11749 		sc->sc_tipg = TIPG_WM_DFLT;
   11750 	else
   11751 		sc->sc_tipg = TIPG_LG_DFLT;
   11752 
   11753 	sc->sc_tbi_serdes_anegticks = 5;
   11754 
   11755 	/* Initialize our media structures */
   11756 	sc->sc_mii.mii_ifp = ifp;
   11757 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11758 
   11759 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11760 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11761 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11762 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11763 	else
   11764 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11765 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11766 
   11767 	/*
   11768 	 * SWD Pins:
   11769 	 *
   11770 	 *	0 = Link LED (output)
   11771 	 *	1 = Loss Of Signal (input)
   11772 	 */
   11773 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11774 
   11775 	/* XXX Perhaps this is only for TBI */
   11776 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11777 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11778 
   11779 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11780 		sc->sc_ctrl &= ~CTRL_LRST;
   11781 
   11782 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11783 
   11784 #define	ADD(ss, mm, dd)							\
   11785 do {									\
   11786 	aprint_normal("%s%s", sep, ss);					\
   11787 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11788 	sep = ", ";							\
   11789 } while (/*CONSTCOND*/0)
   11790 
   11791 	aprint_normal_dev(sc->sc_dev, "");
   11792 
   11793 	if (sc->sc_type == WM_T_I354) {
   11794 		uint32_t status;
   11795 
   11796 		status = CSR_READ(sc, WMREG_STATUS);
   11797 		if (((status & STATUS_2P5_SKU) != 0)
   11798 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11799 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11800 		} else
   11801 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11802 	} else if (sc->sc_type == WM_T_82545) {
   11803 		/* Only 82545 is LX (XXX except SFP) */
   11804 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11805 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11806 	} else {
   11807 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11808 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11809 	}
   11810 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11811 	aprint_normal("\n");
   11812 
   11813 #undef ADD
   11814 
   11815 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11816 }
   11817 
   11818 /*
   11819  * wm_tbi_mediachange:	[ifmedia interface function]
   11820  *
   11821  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11822  */
   11823 static int
   11824 wm_tbi_mediachange(struct ifnet *ifp)
   11825 {
   11826 	struct wm_softc *sc = ifp->if_softc;
   11827 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11828 	uint32_t status, ctrl;
   11829 	bool signal;
   11830 	int i;
   11831 
   11832 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11833 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11834 		/* XXX need some work for >= 82571 and < 82575 */
   11835 		if (sc->sc_type < WM_T_82575)
   11836 			return 0;
   11837 	}
   11838 
   11839 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11840 	    || (sc->sc_type >= WM_T_82575))
   11841 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11842 
   11843 	sc->sc_ctrl &= ~CTRL_LRST;
   11844 	sc->sc_txcw = TXCW_ANE;
   11845 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11846 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11847 	else if (ife->ifm_media & IFM_FDX)
   11848 		sc->sc_txcw |= TXCW_FD;
   11849 	else
   11850 		sc->sc_txcw |= TXCW_HD;
   11851 
   11852 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11853 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11854 
   11855 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11856 		device_xname(sc->sc_dev), sc->sc_txcw));
   11857 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11858 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11859 	CSR_WRITE_FLUSH(sc);
   11860 	delay(1000);
   11861 
   11862 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11863 	signal = wm_tbi_havesignal(sc, ctrl);
   11864 
   11865 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11866 		signal));
   11867 
   11868 	if (signal) {
   11869 		/* Have signal; wait for the link to come up. */
   11870 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11871 			delay(10000);
   11872 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11873 				break;
   11874 		}
   11875 
   11876 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11877 			device_xname(sc->sc_dev), i));
   11878 
   11879 		status = CSR_READ(sc, WMREG_STATUS);
   11880 		DPRINTF(WM_DEBUG_LINK,
   11881 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11882 			device_xname(sc->sc_dev), status, STATUS_LU));
   11883 		if (status & STATUS_LU) {
   11884 			/* Link is up. */
   11885 			DPRINTF(WM_DEBUG_LINK,
   11886 			    ("%s: LINK: set media -> link up %s\n",
   11887 				device_xname(sc->sc_dev),
   11888 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11889 
   11890 			/*
   11891 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11892 			 * so we should update sc->sc_ctrl
   11893 			 */
   11894 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11895 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11896 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11897 			if (status & STATUS_FD)
   11898 				sc->sc_tctl |=
   11899 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11900 			else
   11901 				sc->sc_tctl |=
   11902 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11903 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11904 				sc->sc_fcrtl |= FCRTL_XONE;
   11905 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11906 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11907 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11908 			sc->sc_tbi_linkup = 1;
   11909 		} else {
   11910 			if (i == WM_LINKUP_TIMEOUT)
   11911 				wm_check_for_link(sc);
   11912 			/* Link is down. */
   11913 			DPRINTF(WM_DEBUG_LINK,
   11914 			    ("%s: LINK: set media -> link down\n",
   11915 				device_xname(sc->sc_dev)));
   11916 			sc->sc_tbi_linkup = 0;
   11917 		}
   11918 	} else {
   11919 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11920 			device_xname(sc->sc_dev)));
   11921 		sc->sc_tbi_linkup = 0;
   11922 	}
   11923 
   11924 	wm_tbi_serdes_set_linkled(sc);
   11925 
   11926 	return 0;
   11927 }
   11928 
   11929 /*
   11930  * wm_tbi_mediastatus:	[ifmedia interface function]
   11931  *
   11932  *	Get the current interface media status on a 1000BASE-X device.
   11933  */
   11934 static void
   11935 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11936 {
   11937 	struct wm_softc *sc = ifp->if_softc;
   11938 	uint32_t ctrl, status;
   11939 
   11940 	ifmr->ifm_status = IFM_AVALID;
   11941 	ifmr->ifm_active = IFM_ETHER;
   11942 
   11943 	status = CSR_READ(sc, WMREG_STATUS);
   11944 	if ((status & STATUS_LU) == 0) {
   11945 		ifmr->ifm_active |= IFM_NONE;
   11946 		return;
   11947 	}
   11948 
   11949 	ifmr->ifm_status |= IFM_ACTIVE;
   11950 	/* Only 82545 is LX */
   11951 	if (sc->sc_type == WM_T_82545)
   11952 		ifmr->ifm_active |= IFM_1000_LX;
   11953 	else
   11954 		ifmr->ifm_active |= IFM_1000_SX;
   11955 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11956 		ifmr->ifm_active |= IFM_FDX;
   11957 	else
   11958 		ifmr->ifm_active |= IFM_HDX;
   11959 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11960 	if (ctrl & CTRL_RFCE)
   11961 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11962 	if (ctrl & CTRL_TFCE)
   11963 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11964 }
   11965 
   11966 /* XXX TBI only */
   11967 static int
   11968 wm_check_for_link(struct wm_softc *sc)
   11969 {
   11970 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11971 	uint32_t rxcw;
   11972 	uint32_t ctrl;
   11973 	uint32_t status;
   11974 	bool signal;
   11975 
   11976 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11977 		device_xname(sc->sc_dev), __func__));
   11978 
   11979 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11980 		/* XXX need some work for >= 82571 */
   11981 		if (sc->sc_type >= WM_T_82571) {
   11982 			sc->sc_tbi_linkup = 1;
   11983 			return 0;
   11984 		}
   11985 	}
   11986 
   11987 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11988 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11989 	status = CSR_READ(sc, WMREG_STATUS);
   11990 	signal = wm_tbi_havesignal(sc, ctrl);
   11991 
   11992 	DPRINTF(WM_DEBUG_LINK,
   11993 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11994 		device_xname(sc->sc_dev), __func__, signal,
   11995 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11996 
   11997 	/*
   11998 	 * SWDPIN   LU RXCW
   11999 	 *	0    0	  0
   12000 	 *	0    0	  1	(should not happen)
   12001 	 *	0    1	  0	(should not happen)
   12002 	 *	0    1	  1	(should not happen)
   12003 	 *	1    0	  0	Disable autonego and force linkup
   12004 	 *	1    0	  1	got /C/ but not linkup yet
   12005 	 *	1    1	  0	(linkup)
   12006 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12007 	 *
   12008 	 */
   12009 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12010 		DPRINTF(WM_DEBUG_LINK,
   12011 		    ("%s: %s: force linkup and fullduplex\n",
   12012 			device_xname(sc->sc_dev), __func__));
   12013 		sc->sc_tbi_linkup = 0;
   12014 		/* Disable auto-negotiation in the TXCW register */
   12015 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12016 
   12017 		/*
   12018 		 * Force link-up and also force full-duplex.
   12019 		 *
   12020 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12021 		 * so we should update sc->sc_ctrl
   12022 		 */
   12023 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12024 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12025 	} else if (((status & STATUS_LU) != 0)
   12026 	    && ((rxcw & RXCW_C) != 0)
   12027 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12028 		sc->sc_tbi_linkup = 1;
   12029 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12030 			device_xname(sc->sc_dev),
   12031 			__func__));
   12032 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12033 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12034 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12035 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12036 			device_xname(sc->sc_dev), __func__));
   12037 	} else {
   12038 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12039 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12040 			status));
   12041 	}
   12042 
   12043 	return 0;
   12044 }
   12045 
   12046 /*
   12047  * wm_tbi_tick:
   12048  *
   12049  *	Check the link on TBI devices.
   12050  *	This function acts as mii_tick().
   12051  */
   12052 static void
   12053 wm_tbi_tick(struct wm_softc *sc)
   12054 {
   12055 	struct mii_data *mii = &sc->sc_mii;
   12056 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12057 	uint32_t status;
   12058 
   12059 	KASSERT(WM_CORE_LOCKED(sc));
   12060 
   12061 	status = CSR_READ(sc, WMREG_STATUS);
   12062 
   12063 	/* XXX is this needed? */
   12064 	(void)CSR_READ(sc, WMREG_RXCW);
   12065 	(void)CSR_READ(sc, WMREG_CTRL);
   12066 
   12067 	/* set link status */
   12068 	if ((status & STATUS_LU) == 0) {
   12069 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12070 			device_xname(sc->sc_dev)));
   12071 		sc->sc_tbi_linkup = 0;
   12072 	} else if (sc->sc_tbi_linkup == 0) {
   12073 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12074 			device_xname(sc->sc_dev),
   12075 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12076 		sc->sc_tbi_linkup = 1;
   12077 		sc->sc_tbi_serdes_ticks = 0;
   12078 	}
   12079 
   12080 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12081 		goto setled;
   12082 
   12083 	if ((status & STATUS_LU) == 0) {
   12084 		sc->sc_tbi_linkup = 0;
   12085 		/* If the timer expired, retry autonegotiation */
   12086 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12087 		    && (++sc->sc_tbi_serdes_ticks
   12088 			>= sc->sc_tbi_serdes_anegticks)) {
   12089 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12090 				device_xname(sc->sc_dev), __func__));
   12091 			sc->sc_tbi_serdes_ticks = 0;
   12092 			/*
   12093 			 * Reset the link, and let autonegotiation do
   12094 			 * its thing
   12095 			 */
   12096 			sc->sc_ctrl |= CTRL_LRST;
   12097 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12098 			CSR_WRITE_FLUSH(sc);
   12099 			delay(1000);
   12100 			sc->sc_ctrl &= ~CTRL_LRST;
   12101 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12102 			CSR_WRITE_FLUSH(sc);
   12103 			delay(1000);
   12104 			CSR_WRITE(sc, WMREG_TXCW,
   12105 			    sc->sc_txcw & ~TXCW_ANE);
   12106 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12107 		}
   12108 	}
   12109 
   12110 setled:
   12111 	wm_tbi_serdes_set_linkled(sc);
   12112 }
   12113 
   12114 /* SERDES related */
   12115 static void
   12116 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12117 {
   12118 	uint32_t reg;
   12119 
   12120 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12121 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12122 		return;
   12123 
   12124 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12125 	reg |= PCS_CFG_PCS_EN;
   12126 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12127 
   12128 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12129 	reg &= ~CTRL_EXT_SWDPIN(3);
   12130 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12131 	CSR_WRITE_FLUSH(sc);
   12132 }
   12133 
   12134 static int
   12135 wm_serdes_mediachange(struct ifnet *ifp)
   12136 {
   12137 	struct wm_softc *sc = ifp->if_softc;
   12138 	bool pcs_autoneg = true; /* XXX */
   12139 	uint32_t ctrl_ext, pcs_lctl, reg;
   12140 
   12141 	/* XXX Currently, this function is not called on 8257[12] */
   12142 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12143 	    || (sc->sc_type >= WM_T_82575))
   12144 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12145 
   12146 	wm_serdes_power_up_link_82575(sc);
   12147 
   12148 	sc->sc_ctrl |= CTRL_SLU;
   12149 
   12150 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12151 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12152 
   12153 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12154 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12155 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12156 	case CTRL_EXT_LINK_MODE_SGMII:
   12157 		pcs_autoneg = true;
   12158 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12159 		break;
   12160 	case CTRL_EXT_LINK_MODE_1000KX:
   12161 		pcs_autoneg = false;
   12162 		/* FALLTHROUGH */
   12163 	default:
   12164 		if ((sc->sc_type == WM_T_82575)
   12165 		    || (sc->sc_type == WM_T_82576)) {
   12166 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12167 				pcs_autoneg = false;
   12168 		}
   12169 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12170 		    | CTRL_FRCFDX;
   12171 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12172 	}
   12173 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12174 
   12175 	if (pcs_autoneg) {
   12176 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12177 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12178 
   12179 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12180 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12181 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12182 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12183 	} else
   12184 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12185 
   12186 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12187 
   12188 
   12189 	return 0;
   12190 }
   12191 
   12192 static void
   12193 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12194 {
   12195 	struct wm_softc *sc = ifp->if_softc;
   12196 	struct mii_data *mii = &sc->sc_mii;
   12197 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12198 	uint32_t pcs_adv, pcs_lpab, reg;
   12199 
   12200 	ifmr->ifm_status = IFM_AVALID;
   12201 	ifmr->ifm_active = IFM_ETHER;
   12202 
   12203 	/* Check PCS */
   12204 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12205 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12206 		ifmr->ifm_active |= IFM_NONE;
   12207 		sc->sc_tbi_linkup = 0;
   12208 		goto setled;
   12209 	}
   12210 
   12211 	sc->sc_tbi_linkup = 1;
   12212 	ifmr->ifm_status |= IFM_ACTIVE;
   12213 	if (sc->sc_type == WM_T_I354) {
   12214 		uint32_t status;
   12215 
   12216 		status = CSR_READ(sc, WMREG_STATUS);
   12217 		if (((status & STATUS_2P5_SKU) != 0)
   12218 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12219 			ifmr->ifm_active |= IFM_2500_KX;
   12220 		} else
   12221 			ifmr->ifm_active |= IFM_1000_KX;
   12222 	} else {
   12223 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12224 		case PCS_LSTS_SPEED_10:
   12225 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12226 			break;
   12227 		case PCS_LSTS_SPEED_100:
   12228 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12229 			break;
   12230 		case PCS_LSTS_SPEED_1000:
   12231 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12232 			break;
   12233 		default:
   12234 			device_printf(sc->sc_dev, "Unknown speed\n");
   12235 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12236 			break;
   12237 		}
   12238 	}
   12239 	if ((reg & PCS_LSTS_FDX) != 0)
   12240 		ifmr->ifm_active |= IFM_FDX;
   12241 	else
   12242 		ifmr->ifm_active |= IFM_HDX;
   12243 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12244 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12245 		/* Check flow */
   12246 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12247 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12248 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12249 			goto setled;
   12250 		}
   12251 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12252 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12253 		DPRINTF(WM_DEBUG_LINK,
   12254 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12255 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12256 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12257 			mii->mii_media_active |= IFM_FLOW
   12258 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12259 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12260 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12261 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12262 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12263 			mii->mii_media_active |= IFM_FLOW
   12264 			    | IFM_ETH_TXPAUSE;
   12265 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12266 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12267 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12268 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12269 			mii->mii_media_active |= IFM_FLOW
   12270 			    | IFM_ETH_RXPAUSE;
   12271 		}
   12272 	}
   12273 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12274 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12275 setled:
   12276 	wm_tbi_serdes_set_linkled(sc);
   12277 }
   12278 
   12279 /*
   12280  * wm_serdes_tick:
   12281  *
   12282  *	Check the link on serdes devices.
   12283  */
   12284 static void
   12285 wm_serdes_tick(struct wm_softc *sc)
   12286 {
   12287 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12288 	struct mii_data *mii = &sc->sc_mii;
   12289 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12290 	uint32_t reg;
   12291 
   12292 	KASSERT(WM_CORE_LOCKED(sc));
   12293 
   12294 	mii->mii_media_status = IFM_AVALID;
   12295 	mii->mii_media_active = IFM_ETHER;
   12296 
   12297 	/* Check PCS */
   12298 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12299 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12300 		mii->mii_media_status |= IFM_ACTIVE;
   12301 		sc->sc_tbi_linkup = 1;
   12302 		sc->sc_tbi_serdes_ticks = 0;
   12303 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12304 		if ((reg & PCS_LSTS_FDX) != 0)
   12305 			mii->mii_media_active |= IFM_FDX;
   12306 		else
   12307 			mii->mii_media_active |= IFM_HDX;
   12308 	} else {
   12309 		mii->mii_media_status |= IFM_NONE;
   12310 		sc->sc_tbi_linkup = 0;
   12311 		/* If the timer expired, retry autonegotiation */
   12312 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12313 		    && (++sc->sc_tbi_serdes_ticks
   12314 			>= sc->sc_tbi_serdes_anegticks)) {
   12315 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12316 				device_xname(sc->sc_dev), __func__));
   12317 			sc->sc_tbi_serdes_ticks = 0;
   12318 			/* XXX */
   12319 			wm_serdes_mediachange(ifp);
   12320 		}
   12321 	}
   12322 
   12323 	wm_tbi_serdes_set_linkled(sc);
   12324 }
   12325 
   12326 /* SFP related */
   12327 
   12328 static int
   12329 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12330 {
   12331 	uint32_t i2ccmd;
   12332 	int i;
   12333 
   12334 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12335 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12336 
   12337 	/* Poll the ready bit */
   12338 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12339 		delay(50);
   12340 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12341 		if (i2ccmd & I2CCMD_READY)
   12342 			break;
   12343 	}
   12344 	if ((i2ccmd & I2CCMD_READY) == 0)
   12345 		return -1;
   12346 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12347 		return -1;
   12348 
   12349 	*data = i2ccmd & 0x00ff;
   12350 
   12351 	return 0;
   12352 }
   12353 
   12354 static uint32_t
   12355 wm_sfp_get_media_type(struct wm_softc *sc)
   12356 {
   12357 	uint32_t ctrl_ext;
   12358 	uint8_t val = 0;
   12359 	int timeout = 3;
   12360 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12361 	int rv = -1;
   12362 
   12363 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12364 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12365 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12366 	CSR_WRITE_FLUSH(sc);
   12367 
   12368 	/* Read SFP module data */
   12369 	while (timeout) {
   12370 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12371 		if (rv == 0)
   12372 			break;
   12373 		delay(100*1000); /* XXX too big */
   12374 		timeout--;
   12375 	}
   12376 	if (rv != 0)
   12377 		goto out;
   12378 
   12379 	switch (val) {
   12380 	case SFF_SFP_ID_SFF:
   12381 		aprint_normal_dev(sc->sc_dev,
   12382 		    "Module/Connector soldered to board\n");
   12383 		break;
   12384 	case SFF_SFP_ID_SFP:
   12385 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12386 		break;
   12387 	case SFF_SFP_ID_UNKNOWN:
   12388 		goto out;
   12389 	default:
   12390 		break;
   12391 	}
   12392 
   12393 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12394 	if (rv != 0)
   12395 		goto out;
   12396 
   12397 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12398 		mediatype = WM_MEDIATYPE_SERDES;
   12399 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12400 		sc->sc_flags |= WM_F_SGMII;
   12401 		mediatype = WM_MEDIATYPE_COPPER;
   12402 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12403 		sc->sc_flags |= WM_F_SGMII;
   12404 		mediatype = WM_MEDIATYPE_SERDES;
   12405 	}
   12406 
   12407 out:
   12408 	/* Restore I2C interface setting */
   12409 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12410 
   12411 	return mediatype;
   12412 }
   12413 
   12414 /*
   12415  * NVM related.
   12416  * Microwire, SPI (w/wo EERD) and Flash.
   12417  */
   12418 
   12419 /* Both spi and uwire */
   12420 
   12421 /*
   12422  * wm_eeprom_sendbits:
   12423  *
   12424  *	Send a series of bits to the EEPROM.
   12425  */
   12426 static void
   12427 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12428 {
   12429 	uint32_t reg;
   12430 	int x;
   12431 
   12432 	reg = CSR_READ(sc, WMREG_EECD);
   12433 
   12434 	for (x = nbits; x > 0; x--) {
   12435 		if (bits & (1U << (x - 1)))
   12436 			reg |= EECD_DI;
   12437 		else
   12438 			reg &= ~EECD_DI;
   12439 		CSR_WRITE(sc, WMREG_EECD, reg);
   12440 		CSR_WRITE_FLUSH(sc);
   12441 		delay(2);
   12442 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12443 		CSR_WRITE_FLUSH(sc);
   12444 		delay(2);
   12445 		CSR_WRITE(sc, WMREG_EECD, reg);
   12446 		CSR_WRITE_FLUSH(sc);
   12447 		delay(2);
   12448 	}
   12449 }
   12450 
   12451 /*
   12452  * wm_eeprom_recvbits:
   12453  *
   12454  *	Receive a series of bits from the EEPROM.
   12455  */
   12456 static void
   12457 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12458 {
   12459 	uint32_t reg, val;
   12460 	int x;
   12461 
   12462 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12463 
   12464 	val = 0;
   12465 	for (x = nbits; x > 0; x--) {
   12466 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12467 		CSR_WRITE_FLUSH(sc);
   12468 		delay(2);
   12469 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12470 			val |= (1U << (x - 1));
   12471 		CSR_WRITE(sc, WMREG_EECD, reg);
   12472 		CSR_WRITE_FLUSH(sc);
   12473 		delay(2);
   12474 	}
   12475 	*valp = val;
   12476 }
   12477 
   12478 /* Microwire */
   12479 
   12480 /*
   12481  * wm_nvm_read_uwire:
   12482  *
   12483  *	Read a word from the EEPROM using the MicroWire protocol.
   12484  */
   12485 static int
   12486 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12487 {
   12488 	uint32_t reg, val;
   12489 	int i;
   12490 
   12491 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12492 		device_xname(sc->sc_dev), __func__));
   12493 
   12494 	if (sc->nvm.acquire(sc) != 0)
   12495 		return -1;
   12496 
   12497 	for (i = 0; i < wordcnt; i++) {
   12498 		/* Clear SK and DI. */
   12499 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12500 		CSR_WRITE(sc, WMREG_EECD, reg);
   12501 
   12502 		/*
   12503 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12504 		 * and Xen.
   12505 		 *
   12506 		 * We use this workaround only for 82540 because qemu's
   12507 		 * e1000 act as 82540.
   12508 		 */
   12509 		if (sc->sc_type == WM_T_82540) {
   12510 			reg |= EECD_SK;
   12511 			CSR_WRITE(sc, WMREG_EECD, reg);
   12512 			reg &= ~EECD_SK;
   12513 			CSR_WRITE(sc, WMREG_EECD, reg);
   12514 			CSR_WRITE_FLUSH(sc);
   12515 			delay(2);
   12516 		}
   12517 		/* XXX: end of workaround */
   12518 
   12519 		/* Set CHIP SELECT. */
   12520 		reg |= EECD_CS;
   12521 		CSR_WRITE(sc, WMREG_EECD, reg);
   12522 		CSR_WRITE_FLUSH(sc);
   12523 		delay(2);
   12524 
   12525 		/* Shift in the READ command. */
   12526 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12527 
   12528 		/* Shift in address. */
   12529 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12530 
   12531 		/* Shift out the data. */
   12532 		wm_eeprom_recvbits(sc, &val, 16);
   12533 		data[i] = val & 0xffff;
   12534 
   12535 		/* Clear CHIP SELECT. */
   12536 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12537 		CSR_WRITE(sc, WMREG_EECD, reg);
   12538 		CSR_WRITE_FLUSH(sc);
   12539 		delay(2);
   12540 	}
   12541 
   12542 	sc->nvm.release(sc);
   12543 	return 0;
   12544 }
   12545 
   12546 /* SPI */
   12547 
   12548 /*
   12549  * Set SPI and FLASH related information from the EECD register.
   12550  * For 82541 and 82547, the word size is taken from EEPROM.
   12551  */
   12552 static int
   12553 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12554 {
   12555 	int size;
   12556 	uint32_t reg;
   12557 	uint16_t data;
   12558 
   12559 	reg = CSR_READ(sc, WMREG_EECD);
   12560 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12561 
   12562 	/* Read the size of NVM from EECD by default */
   12563 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12564 	switch (sc->sc_type) {
   12565 	case WM_T_82541:
   12566 	case WM_T_82541_2:
   12567 	case WM_T_82547:
   12568 	case WM_T_82547_2:
   12569 		/* Set dummy value to access EEPROM */
   12570 		sc->sc_nvm_wordsize = 64;
   12571 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12572 			aprint_error_dev(sc->sc_dev,
   12573 			    "%s: failed to read EEPROM size\n", __func__);
   12574 		}
   12575 		reg = data;
   12576 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12577 		if (size == 0)
   12578 			size = 6; /* 64 word size */
   12579 		else
   12580 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12581 		break;
   12582 	case WM_T_80003:
   12583 	case WM_T_82571:
   12584 	case WM_T_82572:
   12585 	case WM_T_82573: /* SPI case */
   12586 	case WM_T_82574: /* SPI case */
   12587 	case WM_T_82583: /* SPI case */
   12588 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12589 		if (size > 14)
   12590 			size = 14;
   12591 		break;
   12592 	case WM_T_82575:
   12593 	case WM_T_82576:
   12594 	case WM_T_82580:
   12595 	case WM_T_I350:
   12596 	case WM_T_I354:
   12597 	case WM_T_I210:
   12598 	case WM_T_I211:
   12599 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12600 		if (size > 15)
   12601 			size = 15;
   12602 		break;
   12603 	default:
   12604 		aprint_error_dev(sc->sc_dev,
   12605 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12606 		return -1;
   12607 		break;
   12608 	}
   12609 
   12610 	sc->sc_nvm_wordsize = 1 << size;
   12611 
   12612 	return 0;
   12613 }
   12614 
   12615 /*
   12616  * wm_nvm_ready_spi:
   12617  *
   12618  *	Wait for a SPI EEPROM to be ready for commands.
   12619  */
   12620 static int
   12621 wm_nvm_ready_spi(struct wm_softc *sc)
   12622 {
   12623 	uint32_t val;
   12624 	int usec;
   12625 
   12626 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12627 		device_xname(sc->sc_dev), __func__));
   12628 
   12629 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12630 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12631 		wm_eeprom_recvbits(sc, &val, 8);
   12632 		if ((val & SPI_SR_RDY) == 0)
   12633 			break;
   12634 	}
   12635 	if (usec >= SPI_MAX_RETRIES) {
   12636 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12637 		return -1;
   12638 	}
   12639 	return 0;
   12640 }
   12641 
   12642 /*
   12643  * wm_nvm_read_spi:
   12644  *
   12645  *	Read a work from the EEPROM using the SPI protocol.
   12646  */
   12647 static int
   12648 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12649 {
   12650 	uint32_t reg, val;
   12651 	int i;
   12652 	uint8_t opc;
   12653 	int rv = 0;
   12654 
   12655 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12656 		device_xname(sc->sc_dev), __func__));
   12657 
   12658 	if (sc->nvm.acquire(sc) != 0)
   12659 		return -1;
   12660 
   12661 	/* Clear SK and CS. */
   12662 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12663 	CSR_WRITE(sc, WMREG_EECD, reg);
   12664 	CSR_WRITE_FLUSH(sc);
   12665 	delay(2);
   12666 
   12667 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12668 		goto out;
   12669 
   12670 	/* Toggle CS to flush commands. */
   12671 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12672 	CSR_WRITE_FLUSH(sc);
   12673 	delay(2);
   12674 	CSR_WRITE(sc, WMREG_EECD, reg);
   12675 	CSR_WRITE_FLUSH(sc);
   12676 	delay(2);
   12677 
   12678 	opc = SPI_OPC_READ;
   12679 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12680 		opc |= SPI_OPC_A8;
   12681 
   12682 	wm_eeprom_sendbits(sc, opc, 8);
   12683 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12684 
   12685 	for (i = 0; i < wordcnt; i++) {
   12686 		wm_eeprom_recvbits(sc, &val, 16);
   12687 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12688 	}
   12689 
   12690 	/* Raise CS and clear SK. */
   12691 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12692 	CSR_WRITE(sc, WMREG_EECD, reg);
   12693 	CSR_WRITE_FLUSH(sc);
   12694 	delay(2);
   12695 
   12696 out:
   12697 	sc->nvm.release(sc);
   12698 	return rv;
   12699 }
   12700 
   12701 /* Using with EERD */
   12702 
   12703 static int
   12704 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12705 {
   12706 	uint32_t attempts = 100000;
   12707 	uint32_t i, reg = 0;
   12708 	int32_t done = -1;
   12709 
   12710 	for (i = 0; i < attempts; i++) {
   12711 		reg = CSR_READ(sc, rw);
   12712 
   12713 		if (reg & EERD_DONE) {
   12714 			done = 0;
   12715 			break;
   12716 		}
   12717 		delay(5);
   12718 	}
   12719 
   12720 	return done;
   12721 }
   12722 
   12723 static int
   12724 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12725 {
   12726 	int i, eerd = 0;
   12727 	int rv = 0;
   12728 
   12729 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12730 		device_xname(sc->sc_dev), __func__));
   12731 
   12732 	if (sc->nvm.acquire(sc) != 0)
   12733 		return -1;
   12734 
   12735 	for (i = 0; i < wordcnt; i++) {
   12736 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12737 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12738 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12739 		if (rv != 0) {
   12740 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12741 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12742 			break;
   12743 		}
   12744 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12745 	}
   12746 
   12747 	sc->nvm.release(sc);
   12748 	return rv;
   12749 }
   12750 
   12751 /* Flash */
   12752 
   12753 static int
   12754 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12755 {
   12756 	uint32_t eecd;
   12757 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12758 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12759 	uint32_t nvm_dword = 0;
   12760 	uint8_t sig_byte = 0;
   12761 	int rv;
   12762 
   12763 	switch (sc->sc_type) {
   12764 	case WM_T_PCH_SPT:
   12765 	case WM_T_PCH_CNP:
   12766 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12767 		act_offset = ICH_NVM_SIG_WORD * 2;
   12768 
   12769 		/* Set bank to 0 in case flash read fails. */
   12770 		*bank = 0;
   12771 
   12772 		/* Check bank 0 */
   12773 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12774 		if (rv != 0)
   12775 			return rv;
   12776 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12777 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12778 			*bank = 0;
   12779 			return 0;
   12780 		}
   12781 
   12782 		/* Check bank 1 */
   12783 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12784 		    &nvm_dword);
   12785 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12786 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12787 			*bank = 1;
   12788 			return 0;
   12789 		}
   12790 		aprint_error_dev(sc->sc_dev,
   12791 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12792 		return -1;
   12793 	case WM_T_ICH8:
   12794 	case WM_T_ICH9:
   12795 		eecd = CSR_READ(sc, WMREG_EECD);
   12796 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12797 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12798 			return 0;
   12799 		}
   12800 		/* FALLTHROUGH */
   12801 	default:
   12802 		/* Default to 0 */
   12803 		*bank = 0;
   12804 
   12805 		/* Check bank 0 */
   12806 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12807 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12808 			*bank = 0;
   12809 			return 0;
   12810 		}
   12811 
   12812 		/* Check bank 1 */
   12813 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12814 		    &sig_byte);
   12815 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12816 			*bank = 1;
   12817 			return 0;
   12818 		}
   12819 	}
   12820 
   12821 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12822 		device_xname(sc->sc_dev)));
   12823 	return -1;
   12824 }
   12825 
   12826 /******************************************************************************
   12827  * This function does initial flash setup so that a new read/write/erase cycle
   12828  * can be started.
   12829  *
   12830  * sc - The pointer to the hw structure
   12831  ****************************************************************************/
   12832 static int32_t
   12833 wm_ich8_cycle_init(struct wm_softc *sc)
   12834 {
   12835 	uint16_t hsfsts;
   12836 	int32_t error = 1;
   12837 	int32_t i     = 0;
   12838 
   12839 	if (sc->sc_type >= WM_T_PCH_SPT)
   12840 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12841 	else
   12842 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12843 
   12844 	/* May be check the Flash Des Valid bit in Hw status */
   12845 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12846 		return error;
   12847 
   12848 	/* Clear FCERR in Hw status by writing 1 */
   12849 	/* Clear DAEL in Hw status by writing a 1 */
   12850 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12851 
   12852 	if (sc->sc_type >= WM_T_PCH_SPT)
   12853 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12854 	else
   12855 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12856 
   12857 	/*
   12858 	 * Either we should have a hardware SPI cycle in progress bit to check
   12859 	 * against, in order to start a new cycle or FDONE bit should be
   12860 	 * changed in the hardware so that it is 1 after hardware reset, which
   12861 	 * can then be used as an indication whether a cycle is in progress or
   12862 	 * has been completed .. we should also have some software semaphore
   12863 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12864 	 * threads access to those bits can be sequentiallized or a way so that
   12865 	 * 2 threads don't start the cycle at the same time
   12866 	 */
   12867 
   12868 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12869 		/*
   12870 		 * There is no cycle running at present, so we can start a
   12871 		 * cycle
   12872 		 */
   12873 
   12874 		/* Begin by setting Flash Cycle Done. */
   12875 		hsfsts |= HSFSTS_DONE;
   12876 		if (sc->sc_type >= WM_T_PCH_SPT)
   12877 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12878 			    hsfsts & 0xffffUL);
   12879 		else
   12880 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12881 		error = 0;
   12882 	} else {
   12883 		/*
   12884 		 * Otherwise poll for sometime so the current cycle has a
   12885 		 * chance to end before giving up.
   12886 		 */
   12887 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12888 			if (sc->sc_type >= WM_T_PCH_SPT)
   12889 				hsfsts = ICH8_FLASH_READ32(sc,
   12890 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12891 			else
   12892 				hsfsts = ICH8_FLASH_READ16(sc,
   12893 				    ICH_FLASH_HSFSTS);
   12894 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12895 				error = 0;
   12896 				break;
   12897 			}
   12898 			delay(1);
   12899 		}
   12900 		if (error == 0) {
   12901 			/*
   12902 			 * Successful in waiting for previous cycle to timeout,
   12903 			 * now set the Flash Cycle Done.
   12904 			 */
   12905 			hsfsts |= HSFSTS_DONE;
   12906 			if (sc->sc_type >= WM_T_PCH_SPT)
   12907 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12908 				    hsfsts & 0xffffUL);
   12909 			else
   12910 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12911 				    hsfsts);
   12912 		}
   12913 	}
   12914 	return error;
   12915 }
   12916 
   12917 /******************************************************************************
   12918  * This function starts a flash cycle and waits for its completion
   12919  *
   12920  * sc - The pointer to the hw structure
   12921  ****************************************************************************/
   12922 static int32_t
   12923 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12924 {
   12925 	uint16_t hsflctl;
   12926 	uint16_t hsfsts;
   12927 	int32_t error = 1;
   12928 	uint32_t i = 0;
   12929 
   12930 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12931 	if (sc->sc_type >= WM_T_PCH_SPT)
   12932 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12933 	else
   12934 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12935 	hsflctl |= HSFCTL_GO;
   12936 	if (sc->sc_type >= WM_T_PCH_SPT)
   12937 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12938 		    (uint32_t)hsflctl << 16);
   12939 	else
   12940 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12941 
   12942 	/* Wait till FDONE bit is set to 1 */
   12943 	do {
   12944 		if (sc->sc_type >= WM_T_PCH_SPT)
   12945 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12946 			    & 0xffffUL;
   12947 		else
   12948 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12949 		if (hsfsts & HSFSTS_DONE)
   12950 			break;
   12951 		delay(1);
   12952 		i++;
   12953 	} while (i < timeout);
   12954 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12955 		error = 0;
   12956 
   12957 	return error;
   12958 }
   12959 
   12960 /******************************************************************************
   12961  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12962  *
   12963  * sc - The pointer to the hw structure
   12964  * index - The index of the byte or word to read.
   12965  * size - Size of data to read, 1=byte 2=word, 4=dword
   12966  * data - Pointer to the word to store the value read.
   12967  *****************************************************************************/
   12968 static int32_t
   12969 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12970     uint32_t size, uint32_t *data)
   12971 {
   12972 	uint16_t hsfsts;
   12973 	uint16_t hsflctl;
   12974 	uint32_t flash_linear_address;
   12975 	uint32_t flash_data = 0;
   12976 	int32_t error = 1;
   12977 	int32_t count = 0;
   12978 
   12979 	if (size < 1  || size > 4 || data == 0x0 ||
   12980 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12981 		return error;
   12982 
   12983 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12984 	    sc->sc_ich8_flash_base;
   12985 
   12986 	do {
   12987 		delay(1);
   12988 		/* Steps */
   12989 		error = wm_ich8_cycle_init(sc);
   12990 		if (error)
   12991 			break;
   12992 
   12993 		if (sc->sc_type >= WM_T_PCH_SPT)
   12994 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12995 			    >> 16;
   12996 		else
   12997 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12998 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12999 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13000 		    & HSFCTL_BCOUNT_MASK;
   13001 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13002 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13003 			/*
   13004 			 * In SPT, This register is in Lan memory space, not
   13005 			 * flash. Therefore, only 32 bit access is supported.
   13006 			 */
   13007 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13008 			    (uint32_t)hsflctl << 16);
   13009 		} else
   13010 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13011 
   13012 		/*
   13013 		 * Write the last 24 bits of index into Flash Linear address
   13014 		 * field in Flash Address
   13015 		 */
   13016 		/* TODO: TBD maybe check the index against the size of flash */
   13017 
   13018 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13019 
   13020 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13021 
   13022 		/*
   13023 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13024 		 * the whole sequence a few more times, else read in (shift in)
   13025 		 * the Flash Data0, the order is least significant byte first
   13026 		 * msb to lsb
   13027 		 */
   13028 		if (error == 0) {
   13029 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13030 			if (size == 1)
   13031 				*data = (uint8_t)(flash_data & 0x000000FF);
   13032 			else if (size == 2)
   13033 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13034 			else if (size == 4)
   13035 				*data = (uint32_t)flash_data;
   13036 			break;
   13037 		} else {
   13038 			/*
   13039 			 * If we've gotten here, then things are probably
   13040 			 * completely hosed, but if the error condition is
   13041 			 * detected, it won't hurt to give it another try...
   13042 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13043 			 */
   13044 			if (sc->sc_type >= WM_T_PCH_SPT)
   13045 				hsfsts = ICH8_FLASH_READ32(sc,
   13046 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13047 			else
   13048 				hsfsts = ICH8_FLASH_READ16(sc,
   13049 				    ICH_FLASH_HSFSTS);
   13050 
   13051 			if (hsfsts & HSFSTS_ERR) {
   13052 				/* Repeat for some time before giving up. */
   13053 				continue;
   13054 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13055 				break;
   13056 		}
   13057 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13058 
   13059 	return error;
   13060 }
   13061 
   13062 /******************************************************************************
   13063  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13064  *
   13065  * sc - pointer to wm_hw structure
   13066  * index - The index of the byte to read.
   13067  * data - Pointer to a byte to store the value read.
   13068  *****************************************************************************/
   13069 static int32_t
   13070 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13071 {
   13072 	int32_t status;
   13073 	uint32_t word = 0;
   13074 
   13075 	status = wm_read_ich8_data(sc, index, 1, &word);
   13076 	if (status == 0)
   13077 		*data = (uint8_t)word;
   13078 	else
   13079 		*data = 0;
   13080 
   13081 	return status;
   13082 }
   13083 
   13084 /******************************************************************************
   13085  * Reads a word from the NVM using the ICH8 flash access registers.
   13086  *
   13087  * sc - pointer to wm_hw structure
   13088  * index - The starting byte index of the word to read.
   13089  * data - Pointer to a word to store the value read.
   13090  *****************************************************************************/
   13091 static int32_t
   13092 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13093 {
   13094 	int32_t status;
   13095 	uint32_t word = 0;
   13096 
   13097 	status = wm_read_ich8_data(sc, index, 2, &word);
   13098 	if (status == 0)
   13099 		*data = (uint16_t)word;
   13100 	else
   13101 		*data = 0;
   13102 
   13103 	return status;
   13104 }
   13105 
   13106 /******************************************************************************
   13107  * Reads a dword from the NVM using the ICH8 flash access registers.
   13108  *
   13109  * sc - pointer to wm_hw structure
   13110  * index - The starting byte index of the word to read.
   13111  * data - Pointer to a word to store the value read.
   13112  *****************************************************************************/
   13113 static int32_t
   13114 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13115 {
   13116 	int32_t status;
   13117 
   13118 	status = wm_read_ich8_data(sc, index, 4, data);
   13119 	return status;
   13120 }
   13121 
   13122 /******************************************************************************
   13123  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13124  * register.
   13125  *
   13126  * sc - Struct containing variables accessed by shared code
   13127  * offset - offset of word in the EEPROM to read
   13128  * data - word read from the EEPROM
   13129  * words - number of words to read
   13130  *****************************************************************************/
   13131 static int
   13132 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13133 {
   13134 	int32_t	 rv = 0;
   13135 	uint32_t flash_bank = 0;
   13136 	uint32_t act_offset = 0;
   13137 	uint32_t bank_offset = 0;
   13138 	uint16_t word = 0;
   13139 	uint16_t i = 0;
   13140 
   13141 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13142 		device_xname(sc->sc_dev), __func__));
   13143 
   13144 	if (sc->nvm.acquire(sc) != 0)
   13145 		return -1;
   13146 
   13147 	/*
   13148 	 * We need to know which is the valid flash bank.  In the event
   13149 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13150 	 * managing flash_bank. So it cannot be trusted and needs
   13151 	 * to be updated with each read.
   13152 	 */
   13153 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13154 	if (rv) {
   13155 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13156 			device_xname(sc->sc_dev)));
   13157 		flash_bank = 0;
   13158 	}
   13159 
   13160 	/*
   13161 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13162 	 * size
   13163 	 */
   13164 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13165 
   13166 	for (i = 0; i < words; i++) {
   13167 		/* The NVM part needs a byte offset, hence * 2 */
   13168 		act_offset = bank_offset + ((offset + i) * 2);
   13169 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13170 		if (rv) {
   13171 			aprint_error_dev(sc->sc_dev,
   13172 			    "%s: failed to read NVM\n", __func__);
   13173 			break;
   13174 		}
   13175 		data[i] = word;
   13176 	}
   13177 
   13178 	sc->nvm.release(sc);
   13179 	return rv;
   13180 }
   13181 
   13182 /******************************************************************************
   13183  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13184  * register.
   13185  *
   13186  * sc - Struct containing variables accessed by shared code
   13187  * offset - offset of word in the EEPROM to read
   13188  * data - word read from the EEPROM
   13189  * words - number of words to read
   13190  *****************************************************************************/
   13191 static int
   13192 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13193 {
   13194 	int32_t	 rv = 0;
   13195 	uint32_t flash_bank = 0;
   13196 	uint32_t act_offset = 0;
   13197 	uint32_t bank_offset = 0;
   13198 	uint32_t dword = 0;
   13199 	uint16_t i = 0;
   13200 
   13201 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13202 		device_xname(sc->sc_dev), __func__));
   13203 
   13204 	if (sc->nvm.acquire(sc) != 0)
   13205 		return -1;
   13206 
   13207 	/*
   13208 	 * We need to know which is the valid flash bank.  In the event
   13209 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13210 	 * managing flash_bank. So it cannot be trusted and needs
   13211 	 * to be updated with each read.
   13212 	 */
   13213 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13214 	if (rv) {
   13215 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13216 			device_xname(sc->sc_dev)));
   13217 		flash_bank = 0;
   13218 	}
   13219 
   13220 	/*
   13221 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13222 	 * size
   13223 	 */
   13224 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13225 
   13226 	for (i = 0; i < words; i++) {
   13227 		/* The NVM part needs a byte offset, hence * 2 */
   13228 		act_offset = bank_offset + ((offset + i) * 2);
   13229 		/* but we must read dword aligned, so mask ... */
   13230 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13231 		if (rv) {
   13232 			aprint_error_dev(sc->sc_dev,
   13233 			    "%s: failed to read NVM\n", __func__);
   13234 			break;
   13235 		}
   13236 		/* ... and pick out low or high word */
   13237 		if ((act_offset & 0x2) == 0)
   13238 			data[i] = (uint16_t)(dword & 0xFFFF);
   13239 		else
   13240 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13241 	}
   13242 
   13243 	sc->nvm.release(sc);
   13244 	return rv;
   13245 }
   13246 
   13247 /* iNVM */
   13248 
   13249 static int
   13250 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13251 {
   13252 	int32_t	 rv = 0;
   13253 	uint32_t invm_dword;
   13254 	uint16_t i;
   13255 	uint8_t record_type, word_address;
   13256 
   13257 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13258 		device_xname(sc->sc_dev), __func__));
   13259 
   13260 	for (i = 0; i < INVM_SIZE; i++) {
   13261 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13262 		/* Get record type */
   13263 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13264 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13265 			break;
   13266 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13267 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13268 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13269 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13270 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13271 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13272 			if (word_address == address) {
   13273 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13274 				rv = 0;
   13275 				break;
   13276 			}
   13277 		}
   13278 	}
   13279 
   13280 	return rv;
   13281 }
   13282 
   13283 static int
   13284 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13285 {
   13286 	int rv = 0;
   13287 	int i;
   13288 
   13289 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13290 		device_xname(sc->sc_dev), __func__));
   13291 
   13292 	if (sc->nvm.acquire(sc) != 0)
   13293 		return -1;
   13294 
   13295 	for (i = 0; i < words; i++) {
   13296 		switch (offset + i) {
   13297 		case NVM_OFF_MACADDR:
   13298 		case NVM_OFF_MACADDR1:
   13299 		case NVM_OFF_MACADDR2:
   13300 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13301 			if (rv != 0) {
   13302 				data[i] = 0xffff;
   13303 				rv = -1;
   13304 			}
   13305 			break;
   13306 		case NVM_OFF_CFG2:
   13307 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13308 			if (rv != 0) {
   13309 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13310 				rv = 0;
   13311 			}
   13312 			break;
   13313 		case NVM_OFF_CFG4:
   13314 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13315 			if (rv != 0) {
   13316 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13317 				rv = 0;
   13318 			}
   13319 			break;
   13320 		case NVM_OFF_LED_1_CFG:
   13321 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13322 			if (rv != 0) {
   13323 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13324 				rv = 0;
   13325 			}
   13326 			break;
   13327 		case NVM_OFF_LED_0_2_CFG:
   13328 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13329 			if (rv != 0) {
   13330 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13331 				rv = 0;
   13332 			}
   13333 			break;
   13334 		case NVM_OFF_ID_LED_SETTINGS:
   13335 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13336 			if (rv != 0) {
   13337 				*data = ID_LED_RESERVED_FFFF;
   13338 				rv = 0;
   13339 			}
   13340 			break;
   13341 		default:
   13342 			DPRINTF(WM_DEBUG_NVM,
   13343 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13344 			*data = NVM_RESERVED_WORD;
   13345 			break;
   13346 		}
   13347 	}
   13348 
   13349 	sc->nvm.release(sc);
   13350 	return rv;
   13351 }
   13352 
   13353 /* Lock, detecting NVM type, validate checksum, version and read */
   13354 
   13355 static int
   13356 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13357 {
   13358 	uint32_t eecd = 0;
   13359 
   13360 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13361 	    || sc->sc_type == WM_T_82583) {
   13362 		eecd = CSR_READ(sc, WMREG_EECD);
   13363 
   13364 		/* Isolate bits 15 & 16 */
   13365 		eecd = ((eecd >> 15) & 0x03);
   13366 
   13367 		/* If both bits are set, device is Flash type */
   13368 		if (eecd == 0x03)
   13369 			return 0;
   13370 	}
   13371 	return 1;
   13372 }
   13373 
   13374 static int
   13375 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13376 {
   13377 	uint32_t eec;
   13378 
   13379 	eec = CSR_READ(sc, WMREG_EEC);
   13380 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13381 		return 1;
   13382 
   13383 	return 0;
   13384 }
   13385 
   13386 /*
   13387  * wm_nvm_validate_checksum
   13388  *
   13389  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13390  */
   13391 static int
   13392 wm_nvm_validate_checksum(struct wm_softc *sc)
   13393 {
   13394 	uint16_t checksum;
   13395 	uint16_t eeprom_data;
   13396 #ifdef WM_DEBUG
   13397 	uint16_t csum_wordaddr, valid_checksum;
   13398 #endif
   13399 	int i;
   13400 
   13401 	checksum = 0;
   13402 
   13403 	/* Don't check for I211 */
   13404 	if (sc->sc_type == WM_T_I211)
   13405 		return 0;
   13406 
   13407 #ifdef WM_DEBUG
   13408 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13409 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13410 		csum_wordaddr = NVM_OFF_COMPAT;
   13411 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13412 	} else {
   13413 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13414 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13415 	}
   13416 
   13417 	/* Dump EEPROM image for debug */
   13418 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13419 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13420 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13421 		/* XXX PCH_SPT? */
   13422 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13423 		if ((eeprom_data & valid_checksum) == 0)
   13424 			DPRINTF(WM_DEBUG_NVM,
   13425 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13426 				device_xname(sc->sc_dev), eeprom_data,
   13427 				    valid_checksum));
   13428 	}
   13429 
   13430 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13431 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13432 		for (i = 0; i < NVM_SIZE; i++) {
   13433 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13434 				printf("XXXX ");
   13435 			else
   13436 				printf("%04hx ", eeprom_data);
   13437 			if (i % 8 == 7)
   13438 				printf("\n");
   13439 		}
   13440 	}
   13441 
   13442 #endif /* WM_DEBUG */
   13443 
   13444 	for (i = 0; i < NVM_SIZE; i++) {
   13445 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13446 			return 1;
   13447 		checksum += eeprom_data;
   13448 	}
   13449 
   13450 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13451 #ifdef WM_DEBUG
   13452 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13453 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13454 #endif
   13455 	}
   13456 
   13457 	return 0;
   13458 }
   13459 
   13460 static void
   13461 wm_nvm_version_invm(struct wm_softc *sc)
   13462 {
   13463 	uint32_t dword;
   13464 
   13465 	/*
   13466 	 * Linux's code to decode version is very strange, so we don't
   13467 	 * obey that algorithm and just use word 61 as the document.
   13468 	 * Perhaps it's not perfect though...
   13469 	 *
   13470 	 * Example:
   13471 	 *
   13472 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13473 	 */
   13474 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13475 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13476 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13477 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13478 }
   13479 
   13480 static void
   13481 wm_nvm_version(struct wm_softc *sc)
   13482 {
   13483 	uint16_t major, minor, build, patch;
   13484 	uint16_t uid0, uid1;
   13485 	uint16_t nvm_data;
   13486 	uint16_t off;
   13487 	bool check_version = false;
   13488 	bool check_optionrom = false;
   13489 	bool have_build = false;
   13490 	bool have_uid = true;
   13491 
   13492 	/*
   13493 	 * Version format:
   13494 	 *
   13495 	 * XYYZ
   13496 	 * X0YZ
   13497 	 * X0YY
   13498 	 *
   13499 	 * Example:
   13500 	 *
   13501 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13502 	 *	82571	0x50a6	5.10.6?
   13503 	 *	82572	0x506a	5.6.10?
   13504 	 *	82572EI	0x5069	5.6.9?
   13505 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13506 	 *		0x2013	2.1.3?
   13507 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13508 	 * ICH8+82567	0x0040	0.4.0?
   13509 	 * ICH9+82566	0x1040	1.4.0?
   13510 	 *ICH10+82567	0x0043	0.4.3?
   13511 	 *  PCH+82577	0x00c1	0.12.1?
   13512 	 * PCH2+82579	0x00d3	0.13.3?
   13513 	 *		0x00d4	0.13.4?
   13514 	 *  LPT+I218	0x0023	0.2.3?
   13515 	 *  SPT+I219	0x0084	0.8.4?
   13516 	 *  CNP+I219	0x0054	0.5.4?
   13517 	 */
   13518 
   13519 	/*
   13520 	 * XXX
   13521 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13522 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13523 	 */
   13524 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13525 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13526 		have_uid = false;
   13527 
   13528 	switch (sc->sc_type) {
   13529 	case WM_T_82571:
   13530 	case WM_T_82572:
   13531 	case WM_T_82574:
   13532 	case WM_T_82583:
   13533 		check_version = true;
   13534 		check_optionrom = true;
   13535 		have_build = true;
   13536 		break;
   13537 	case WM_T_ICH8:
   13538 	case WM_T_ICH9:
   13539 	case WM_T_ICH10:
   13540 	case WM_T_PCH:
   13541 	case WM_T_PCH2:
   13542 	case WM_T_PCH_LPT:
   13543 	case WM_T_PCH_SPT:
   13544 	case WM_T_PCH_CNP:
   13545 		check_version = true;
   13546 		have_build = true;
   13547 		have_uid = false;
   13548 		break;
   13549 	case WM_T_82575:
   13550 	case WM_T_82576:
   13551 	case WM_T_82580:
   13552 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13553 			check_version = true;
   13554 		break;
   13555 	case WM_T_I211:
   13556 		wm_nvm_version_invm(sc);
   13557 		have_uid = false;
   13558 		goto printver;
   13559 	case WM_T_I210:
   13560 		if (!wm_nvm_flash_presence_i210(sc)) {
   13561 			wm_nvm_version_invm(sc);
   13562 			have_uid = false;
   13563 			goto printver;
   13564 		}
   13565 		/* FALLTHROUGH */
   13566 	case WM_T_I350:
   13567 	case WM_T_I354:
   13568 		check_version = true;
   13569 		check_optionrom = true;
   13570 		break;
   13571 	default:
   13572 		return;
   13573 	}
   13574 	if (check_version
   13575 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13576 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13577 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13578 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13579 			build = nvm_data & NVM_BUILD_MASK;
   13580 			have_build = true;
   13581 		} else
   13582 			minor = nvm_data & 0x00ff;
   13583 
   13584 		/* Decimal */
   13585 		minor = (minor / 16) * 10 + (minor % 16);
   13586 		sc->sc_nvm_ver_major = major;
   13587 		sc->sc_nvm_ver_minor = minor;
   13588 
   13589 printver:
   13590 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13591 		    sc->sc_nvm_ver_minor);
   13592 		if (have_build) {
   13593 			sc->sc_nvm_ver_build = build;
   13594 			aprint_verbose(".%d", build);
   13595 		}
   13596 	}
   13597 
   13598 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13599 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13600 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13601 		/* Option ROM Version */
   13602 		if ((off != 0x0000) && (off != 0xffff)) {
   13603 			int rv;
   13604 
   13605 			off += NVM_COMBO_VER_OFF;
   13606 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13607 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13608 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13609 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13610 				/* 16bits */
   13611 				major = uid0 >> 8;
   13612 				build = (uid0 << 8) | (uid1 >> 8);
   13613 				patch = uid1 & 0x00ff;
   13614 				aprint_verbose(", option ROM Version %d.%d.%d",
   13615 				    major, build, patch);
   13616 			}
   13617 		}
   13618 	}
   13619 
   13620 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13621 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13622 }
   13623 
   13624 /*
   13625  * wm_nvm_read:
   13626  *
   13627  *	Read data from the serial EEPROM.
   13628  */
   13629 static int
   13630 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13631 {
   13632 	int rv;
   13633 
   13634 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13635 		device_xname(sc->sc_dev), __func__));
   13636 
   13637 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13638 		return -1;
   13639 
   13640 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13641 
   13642 	return rv;
   13643 }
   13644 
   13645 /*
   13646  * Hardware semaphores.
   13647  * Very complexed...
   13648  */
   13649 
   13650 static int
   13651 wm_get_null(struct wm_softc *sc)
   13652 {
   13653 
   13654 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13655 		device_xname(sc->sc_dev), __func__));
   13656 	return 0;
   13657 }
   13658 
   13659 static void
   13660 wm_put_null(struct wm_softc *sc)
   13661 {
   13662 
   13663 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13664 		device_xname(sc->sc_dev), __func__));
   13665 	return;
   13666 }
   13667 
   13668 static int
   13669 wm_get_eecd(struct wm_softc *sc)
   13670 {
   13671 	uint32_t reg;
   13672 	int x;
   13673 
   13674 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13675 		device_xname(sc->sc_dev), __func__));
   13676 
   13677 	reg = CSR_READ(sc, WMREG_EECD);
   13678 
   13679 	/* Request EEPROM access. */
   13680 	reg |= EECD_EE_REQ;
   13681 	CSR_WRITE(sc, WMREG_EECD, reg);
   13682 
   13683 	/* ..and wait for it to be granted. */
   13684 	for (x = 0; x < 1000; x++) {
   13685 		reg = CSR_READ(sc, WMREG_EECD);
   13686 		if (reg & EECD_EE_GNT)
   13687 			break;
   13688 		delay(5);
   13689 	}
   13690 	if ((reg & EECD_EE_GNT) == 0) {
   13691 		aprint_error_dev(sc->sc_dev,
   13692 		    "could not acquire EEPROM GNT\n");
   13693 		reg &= ~EECD_EE_REQ;
   13694 		CSR_WRITE(sc, WMREG_EECD, reg);
   13695 		return -1;
   13696 	}
   13697 
   13698 	return 0;
   13699 }
   13700 
   13701 static void
   13702 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13703 {
   13704 
   13705 	*eecd |= EECD_SK;
   13706 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13707 	CSR_WRITE_FLUSH(sc);
   13708 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13709 		delay(1);
   13710 	else
   13711 		delay(50);
   13712 }
   13713 
   13714 static void
   13715 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13716 {
   13717 
   13718 	*eecd &= ~EECD_SK;
   13719 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13720 	CSR_WRITE_FLUSH(sc);
   13721 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13722 		delay(1);
   13723 	else
   13724 		delay(50);
   13725 }
   13726 
   13727 static void
   13728 wm_put_eecd(struct wm_softc *sc)
   13729 {
   13730 	uint32_t reg;
   13731 
   13732 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13733 		device_xname(sc->sc_dev), __func__));
   13734 
   13735 	/* Stop nvm */
   13736 	reg = CSR_READ(sc, WMREG_EECD);
   13737 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13738 		/* Pull CS high */
   13739 		reg |= EECD_CS;
   13740 		wm_nvm_eec_clock_lower(sc, &reg);
   13741 	} else {
   13742 		/* CS on Microwire is active-high */
   13743 		reg &= ~(EECD_CS | EECD_DI);
   13744 		CSR_WRITE(sc, WMREG_EECD, reg);
   13745 		wm_nvm_eec_clock_raise(sc, &reg);
   13746 		wm_nvm_eec_clock_lower(sc, &reg);
   13747 	}
   13748 
   13749 	reg = CSR_READ(sc, WMREG_EECD);
   13750 	reg &= ~EECD_EE_REQ;
   13751 	CSR_WRITE(sc, WMREG_EECD, reg);
   13752 
   13753 	return;
   13754 }
   13755 
   13756 /*
   13757  * Get hardware semaphore.
   13758  * Same as e1000_get_hw_semaphore_generic()
   13759  */
   13760 static int
   13761 wm_get_swsm_semaphore(struct wm_softc *sc)
   13762 {
   13763 	int32_t timeout;
   13764 	uint32_t swsm;
   13765 
   13766 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13767 		device_xname(sc->sc_dev), __func__));
   13768 	KASSERT(sc->sc_nvm_wordsize > 0);
   13769 
   13770 retry:
   13771 	/* Get the SW semaphore. */
   13772 	timeout = sc->sc_nvm_wordsize + 1;
   13773 	while (timeout) {
   13774 		swsm = CSR_READ(sc, WMREG_SWSM);
   13775 
   13776 		if ((swsm & SWSM_SMBI) == 0)
   13777 			break;
   13778 
   13779 		delay(50);
   13780 		timeout--;
   13781 	}
   13782 
   13783 	if (timeout == 0) {
   13784 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13785 			/*
   13786 			 * In rare circumstances, the SW semaphore may already
   13787 			 * be held unintentionally. Clear the semaphore once
   13788 			 * before giving up.
   13789 			 */
   13790 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13791 			wm_put_swsm_semaphore(sc);
   13792 			goto retry;
   13793 		}
   13794 		aprint_error_dev(sc->sc_dev,
   13795 		    "could not acquire SWSM SMBI\n");
   13796 		return 1;
   13797 	}
   13798 
   13799 	/* Get the FW semaphore. */
   13800 	timeout = sc->sc_nvm_wordsize + 1;
   13801 	while (timeout) {
   13802 		swsm = CSR_READ(sc, WMREG_SWSM);
   13803 		swsm |= SWSM_SWESMBI;
   13804 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13805 		/* If we managed to set the bit we got the semaphore. */
   13806 		swsm = CSR_READ(sc, WMREG_SWSM);
   13807 		if (swsm & SWSM_SWESMBI)
   13808 			break;
   13809 
   13810 		delay(50);
   13811 		timeout--;
   13812 	}
   13813 
   13814 	if (timeout == 0) {
   13815 		aprint_error_dev(sc->sc_dev,
   13816 		    "could not acquire SWSM SWESMBI\n");
   13817 		/* Release semaphores */
   13818 		wm_put_swsm_semaphore(sc);
   13819 		return 1;
   13820 	}
   13821 	return 0;
   13822 }
   13823 
   13824 /*
   13825  * Put hardware semaphore.
   13826  * Same as e1000_put_hw_semaphore_generic()
   13827  */
   13828 static void
   13829 wm_put_swsm_semaphore(struct wm_softc *sc)
   13830 {
   13831 	uint32_t swsm;
   13832 
   13833 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13834 		device_xname(sc->sc_dev), __func__));
   13835 
   13836 	swsm = CSR_READ(sc, WMREG_SWSM);
   13837 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13838 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13839 }
   13840 
   13841 /*
   13842  * Get SW/FW semaphore.
   13843  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13844  */
   13845 static int
   13846 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13847 {
   13848 	uint32_t swfw_sync;
   13849 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13850 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13851 	int timeout;
   13852 
   13853 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13854 		device_xname(sc->sc_dev), __func__));
   13855 
   13856 	if (sc->sc_type == WM_T_80003)
   13857 		timeout = 50;
   13858 	else
   13859 		timeout = 200;
   13860 
   13861 	while (timeout) {
   13862 		if (wm_get_swsm_semaphore(sc)) {
   13863 			aprint_error_dev(sc->sc_dev,
   13864 			    "%s: failed to get semaphore\n",
   13865 			    __func__);
   13866 			return 1;
   13867 		}
   13868 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13869 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13870 			swfw_sync |= swmask;
   13871 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13872 			wm_put_swsm_semaphore(sc);
   13873 			return 0;
   13874 		}
   13875 		wm_put_swsm_semaphore(sc);
   13876 		delay(5000);
   13877 		timeout--;
   13878 	}
   13879 	device_printf(sc->sc_dev,
   13880 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13881 	    mask, swfw_sync);
   13882 	return 1;
   13883 }
   13884 
   13885 static void
   13886 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13887 {
   13888 	uint32_t swfw_sync;
   13889 
   13890 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13891 		device_xname(sc->sc_dev), __func__));
   13892 
   13893 	while (wm_get_swsm_semaphore(sc) != 0)
   13894 		continue;
   13895 
   13896 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13897 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13898 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13899 
   13900 	wm_put_swsm_semaphore(sc);
   13901 }
   13902 
   13903 static int
   13904 wm_get_nvm_80003(struct wm_softc *sc)
   13905 {
   13906 	int rv;
   13907 
   13908 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13909 		device_xname(sc->sc_dev), __func__));
   13910 
   13911 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13912 		aprint_error_dev(sc->sc_dev,
   13913 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   13914 		return rv;
   13915 	}
   13916 
   13917 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13918 	    && (rv = wm_get_eecd(sc)) != 0) {
   13919 		aprint_error_dev(sc->sc_dev,
   13920 		    "%s: failed to get semaphore(EECD)\n", __func__);
   13921 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13922 		return rv;
   13923 	}
   13924 
   13925 	return 0;
   13926 }
   13927 
   13928 static void
   13929 wm_put_nvm_80003(struct wm_softc *sc)
   13930 {
   13931 
   13932 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13933 		device_xname(sc->sc_dev), __func__));
   13934 
   13935 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13936 		wm_put_eecd(sc);
   13937 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13938 }
   13939 
   13940 static int
   13941 wm_get_nvm_82571(struct wm_softc *sc)
   13942 {
   13943 	int rv;
   13944 
   13945 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13946 		device_xname(sc->sc_dev), __func__));
   13947 
   13948 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13949 		return rv;
   13950 
   13951 	switch (sc->sc_type) {
   13952 	case WM_T_82573:
   13953 		break;
   13954 	default:
   13955 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13956 			rv = wm_get_eecd(sc);
   13957 		break;
   13958 	}
   13959 
   13960 	if (rv != 0) {
   13961 		aprint_error_dev(sc->sc_dev,
   13962 		    "%s: failed to get semaphore\n",
   13963 		    __func__);
   13964 		wm_put_swsm_semaphore(sc);
   13965 	}
   13966 
   13967 	return rv;
   13968 }
   13969 
   13970 static void
   13971 wm_put_nvm_82571(struct wm_softc *sc)
   13972 {
   13973 
   13974 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13975 		device_xname(sc->sc_dev), __func__));
   13976 
   13977 	switch (sc->sc_type) {
   13978 	case WM_T_82573:
   13979 		break;
   13980 	default:
   13981 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13982 			wm_put_eecd(sc);
   13983 		break;
   13984 	}
   13985 
   13986 	wm_put_swsm_semaphore(sc);
   13987 }
   13988 
   13989 static int
   13990 wm_get_phy_82575(struct wm_softc *sc)
   13991 {
   13992 
   13993 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13994 		device_xname(sc->sc_dev), __func__));
   13995 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13996 }
   13997 
   13998 static void
   13999 wm_put_phy_82575(struct wm_softc *sc)
   14000 {
   14001 
   14002 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14003 		device_xname(sc->sc_dev), __func__));
   14004 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14005 }
   14006 
   14007 static int
   14008 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14009 {
   14010 	uint32_t ext_ctrl;
   14011 	int timeout = 200;
   14012 
   14013 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14014 		device_xname(sc->sc_dev), __func__));
   14015 
   14016 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14017 	for (timeout = 0; timeout < 200; timeout++) {
   14018 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14019 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14020 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14021 
   14022 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14023 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14024 			return 0;
   14025 		delay(5000);
   14026 	}
   14027 	device_printf(sc->sc_dev,
   14028 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14029 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14030 	return 1;
   14031 }
   14032 
   14033 static void
   14034 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14035 {
   14036 	uint32_t ext_ctrl;
   14037 
   14038 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14039 		device_xname(sc->sc_dev), __func__));
   14040 
   14041 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14042 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14043 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14044 
   14045 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14046 }
   14047 
   14048 static int
   14049 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14050 {
   14051 	uint32_t ext_ctrl;
   14052 	int timeout;
   14053 
   14054 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14055 		device_xname(sc->sc_dev), __func__));
   14056 	mutex_enter(sc->sc_ich_phymtx);
   14057 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14058 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14059 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14060 			break;
   14061 		delay(1000);
   14062 	}
   14063 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14064 		device_printf(sc->sc_dev,
   14065 		    "SW has already locked the resource\n");
   14066 		goto out;
   14067 	}
   14068 
   14069 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14070 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14071 	for (timeout = 0; timeout < 1000; timeout++) {
   14072 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14073 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14074 			break;
   14075 		delay(1000);
   14076 	}
   14077 	if (timeout >= 1000) {
   14078 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14079 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14080 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14081 		goto out;
   14082 	}
   14083 	return 0;
   14084 
   14085 out:
   14086 	mutex_exit(sc->sc_ich_phymtx);
   14087 	return 1;
   14088 }
   14089 
   14090 static void
   14091 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14092 {
   14093 	uint32_t ext_ctrl;
   14094 
   14095 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14096 		device_xname(sc->sc_dev), __func__));
   14097 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14098 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14099 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14100 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14101 	} else {
   14102 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14103 	}
   14104 
   14105 	mutex_exit(sc->sc_ich_phymtx);
   14106 }
   14107 
   14108 static int
   14109 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14110 {
   14111 
   14112 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14113 		device_xname(sc->sc_dev), __func__));
   14114 	mutex_enter(sc->sc_ich_nvmmtx);
   14115 
   14116 	return 0;
   14117 }
   14118 
   14119 static void
   14120 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14121 {
   14122 
   14123 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14124 		device_xname(sc->sc_dev), __func__));
   14125 	mutex_exit(sc->sc_ich_nvmmtx);
   14126 }
   14127 
   14128 static int
   14129 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14130 {
   14131 	int i = 0;
   14132 	uint32_t reg;
   14133 
   14134 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14135 		device_xname(sc->sc_dev), __func__));
   14136 
   14137 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14138 	do {
   14139 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14140 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14141 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14142 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14143 			break;
   14144 		delay(2*1000);
   14145 		i++;
   14146 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14147 
   14148 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14149 		wm_put_hw_semaphore_82573(sc);
   14150 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14151 		    device_xname(sc->sc_dev));
   14152 		return -1;
   14153 	}
   14154 
   14155 	return 0;
   14156 }
   14157 
   14158 static void
   14159 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14160 {
   14161 	uint32_t reg;
   14162 
   14163 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14164 		device_xname(sc->sc_dev), __func__));
   14165 
   14166 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14167 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14168 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14169 }
   14170 
   14171 /*
   14172  * Management mode and power management related subroutines.
   14173  * BMC, AMT, suspend/resume and EEE.
   14174  */
   14175 
   14176 #ifdef WM_WOL
   14177 static int
   14178 wm_check_mng_mode(struct wm_softc *sc)
   14179 {
   14180 	int rv;
   14181 
   14182 	switch (sc->sc_type) {
   14183 	case WM_T_ICH8:
   14184 	case WM_T_ICH9:
   14185 	case WM_T_ICH10:
   14186 	case WM_T_PCH:
   14187 	case WM_T_PCH2:
   14188 	case WM_T_PCH_LPT:
   14189 	case WM_T_PCH_SPT:
   14190 	case WM_T_PCH_CNP:
   14191 		rv = wm_check_mng_mode_ich8lan(sc);
   14192 		break;
   14193 	case WM_T_82574:
   14194 	case WM_T_82583:
   14195 		rv = wm_check_mng_mode_82574(sc);
   14196 		break;
   14197 	case WM_T_82571:
   14198 	case WM_T_82572:
   14199 	case WM_T_82573:
   14200 	case WM_T_80003:
   14201 		rv = wm_check_mng_mode_generic(sc);
   14202 		break;
   14203 	default:
   14204 		/* Noting to do */
   14205 		rv = 0;
   14206 		break;
   14207 	}
   14208 
   14209 	return rv;
   14210 }
   14211 
   14212 static int
   14213 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14214 {
   14215 	uint32_t fwsm;
   14216 
   14217 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14218 
   14219 	if (((fwsm & FWSM_FW_VALID) != 0)
   14220 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14221 		return 1;
   14222 
   14223 	return 0;
   14224 }
   14225 
   14226 static int
   14227 wm_check_mng_mode_82574(struct wm_softc *sc)
   14228 {
   14229 	uint16_t data;
   14230 
   14231 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14232 
   14233 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14234 		return 1;
   14235 
   14236 	return 0;
   14237 }
   14238 
   14239 static int
   14240 wm_check_mng_mode_generic(struct wm_softc *sc)
   14241 {
   14242 	uint32_t fwsm;
   14243 
   14244 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14245 
   14246 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14247 		return 1;
   14248 
   14249 	return 0;
   14250 }
   14251 #endif /* WM_WOL */
   14252 
   14253 static int
   14254 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14255 {
   14256 	uint32_t manc, fwsm, factps;
   14257 
   14258 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14259 		return 0;
   14260 
   14261 	manc = CSR_READ(sc, WMREG_MANC);
   14262 
   14263 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14264 		device_xname(sc->sc_dev), manc));
   14265 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14266 		return 0;
   14267 
   14268 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14269 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14270 		factps = CSR_READ(sc, WMREG_FACTPS);
   14271 		if (((factps & FACTPS_MNGCG) == 0)
   14272 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14273 			return 1;
   14274 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14275 		uint16_t data;
   14276 
   14277 		factps = CSR_READ(sc, WMREG_FACTPS);
   14278 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14279 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14280 			device_xname(sc->sc_dev), factps, data));
   14281 		if (((factps & FACTPS_MNGCG) == 0)
   14282 		    && ((data & NVM_CFG2_MNGM_MASK)
   14283 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14284 			return 1;
   14285 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14286 	    && ((manc & MANC_ASF_EN) == 0))
   14287 		return 1;
   14288 
   14289 	return 0;
   14290 }
   14291 
   14292 static bool
   14293 wm_phy_resetisblocked(struct wm_softc *sc)
   14294 {
   14295 	bool blocked = false;
   14296 	uint32_t reg;
   14297 	int i = 0;
   14298 
   14299 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14300 		device_xname(sc->sc_dev), __func__));
   14301 
   14302 	switch (sc->sc_type) {
   14303 	case WM_T_ICH8:
   14304 	case WM_T_ICH9:
   14305 	case WM_T_ICH10:
   14306 	case WM_T_PCH:
   14307 	case WM_T_PCH2:
   14308 	case WM_T_PCH_LPT:
   14309 	case WM_T_PCH_SPT:
   14310 	case WM_T_PCH_CNP:
   14311 		do {
   14312 			reg = CSR_READ(sc, WMREG_FWSM);
   14313 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14314 				blocked = true;
   14315 				delay(10*1000);
   14316 				continue;
   14317 			}
   14318 			blocked = false;
   14319 		} while (blocked && (i++ < 30));
   14320 		return blocked;
   14321 		break;
   14322 	case WM_T_82571:
   14323 	case WM_T_82572:
   14324 	case WM_T_82573:
   14325 	case WM_T_82574:
   14326 	case WM_T_82583:
   14327 	case WM_T_80003:
   14328 		reg = CSR_READ(sc, WMREG_MANC);
   14329 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14330 			return true;
   14331 		else
   14332 			return false;
   14333 		break;
   14334 	default:
   14335 		/* No problem */
   14336 		break;
   14337 	}
   14338 
   14339 	return false;
   14340 }
   14341 
   14342 static void
   14343 wm_get_hw_control(struct wm_softc *sc)
   14344 {
   14345 	uint32_t reg;
   14346 
   14347 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14348 		device_xname(sc->sc_dev), __func__));
   14349 
   14350 	if (sc->sc_type == WM_T_82573) {
   14351 		reg = CSR_READ(sc, WMREG_SWSM);
   14352 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14353 	} else if (sc->sc_type >= WM_T_82571) {
   14354 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14355 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14356 	}
   14357 }
   14358 
   14359 static void
   14360 wm_release_hw_control(struct wm_softc *sc)
   14361 {
   14362 	uint32_t reg;
   14363 
   14364 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14365 		device_xname(sc->sc_dev), __func__));
   14366 
   14367 	if (sc->sc_type == WM_T_82573) {
   14368 		reg = CSR_READ(sc, WMREG_SWSM);
   14369 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14370 	} else if (sc->sc_type >= WM_T_82571) {
   14371 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14372 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14373 	}
   14374 }
   14375 
   14376 static void
   14377 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14378 {
   14379 	uint32_t reg;
   14380 
   14381 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14382 		device_xname(sc->sc_dev), __func__));
   14383 
   14384 	if (sc->sc_type < WM_T_PCH2)
   14385 		return;
   14386 
   14387 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14388 
   14389 	if (gate)
   14390 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14391 	else
   14392 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14393 
   14394 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14395 }
   14396 
   14397 static int
   14398 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14399 {
   14400 	uint32_t fwsm, reg;
   14401 	int rv = 0;
   14402 
   14403 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14404 		device_xname(sc->sc_dev), __func__));
   14405 
   14406 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14407 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14408 
   14409 	/* Disable ULP */
   14410 	wm_ulp_disable(sc);
   14411 
   14412 	/* Acquire PHY semaphore */
   14413 	rv = sc->phy.acquire(sc);
   14414 	if (rv != 0) {
   14415 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14416 		device_xname(sc->sc_dev), __func__));
   14417 		return -1;
   14418 	}
   14419 
   14420 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14421 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14422 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14423 	 */
   14424 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14425 	switch (sc->sc_type) {
   14426 	case WM_T_PCH_LPT:
   14427 	case WM_T_PCH_SPT:
   14428 	case WM_T_PCH_CNP:
   14429 		if (wm_phy_is_accessible_pchlan(sc))
   14430 			break;
   14431 
   14432 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14433 		 * forcing MAC to SMBus mode first.
   14434 		 */
   14435 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14436 		reg |= CTRL_EXT_FORCE_SMBUS;
   14437 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14438 #if 0
   14439 		/* XXX Isn't this required??? */
   14440 		CSR_WRITE_FLUSH(sc);
   14441 #endif
   14442 		/* Wait 50 milliseconds for MAC to finish any retries
   14443 		 * that it might be trying to perform from previous
   14444 		 * attempts to acknowledge any phy read requests.
   14445 		 */
   14446 		delay(50 * 1000);
   14447 		/* FALLTHROUGH */
   14448 	case WM_T_PCH2:
   14449 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14450 			break;
   14451 		/* FALLTHROUGH */
   14452 	case WM_T_PCH:
   14453 		if (sc->sc_type == WM_T_PCH)
   14454 			if ((fwsm & FWSM_FW_VALID) != 0)
   14455 				break;
   14456 
   14457 		if (wm_phy_resetisblocked(sc) == true) {
   14458 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14459 			break;
   14460 		}
   14461 
   14462 		/* Toggle LANPHYPC Value bit */
   14463 		wm_toggle_lanphypc_pch_lpt(sc);
   14464 
   14465 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14466 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14467 				break;
   14468 
   14469 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14470 			 * so ensure that the MAC is also out of SMBus mode
   14471 			 */
   14472 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14473 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14474 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14475 
   14476 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14477 				break;
   14478 			rv = -1;
   14479 		}
   14480 		break;
   14481 	default:
   14482 		break;
   14483 	}
   14484 
   14485 	/* Release semaphore */
   14486 	sc->phy.release(sc);
   14487 
   14488 	if (rv == 0) {
   14489 		/* Check to see if able to reset PHY.  Print error if not */
   14490 		if (wm_phy_resetisblocked(sc)) {
   14491 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14492 			goto out;
   14493 		}
   14494 
   14495 		/* Reset the PHY before any access to it.  Doing so, ensures
   14496 		 * that the PHY is in a known good state before we read/write
   14497 		 * PHY registers.  The generic reset is sufficient here,
   14498 		 * because we haven't determined the PHY type yet.
   14499 		 */
   14500 		if (wm_reset_phy(sc) != 0)
   14501 			goto out;
   14502 
   14503 		/* On a successful reset, possibly need to wait for the PHY
   14504 		 * to quiesce to an accessible state before returning control
   14505 		 * to the calling function.  If the PHY does not quiesce, then
   14506 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14507 		 *  the PHY is in.
   14508 		 */
   14509 		if (wm_phy_resetisblocked(sc))
   14510 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14511 	}
   14512 
   14513 out:
   14514 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14515 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14516 		delay(10*1000);
   14517 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14518 	}
   14519 
   14520 	return 0;
   14521 }
   14522 
   14523 static void
   14524 wm_init_manageability(struct wm_softc *sc)
   14525 {
   14526 
   14527 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14528 		device_xname(sc->sc_dev), __func__));
   14529 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14530 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14531 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14532 
   14533 		/* Disable hardware interception of ARP */
   14534 		manc &= ~MANC_ARP_EN;
   14535 
   14536 		/* Enable receiving management packets to the host */
   14537 		if (sc->sc_type >= WM_T_82571) {
   14538 			manc |= MANC_EN_MNG2HOST;
   14539 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14540 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14541 		}
   14542 
   14543 		CSR_WRITE(sc, WMREG_MANC, manc);
   14544 	}
   14545 }
   14546 
   14547 static void
   14548 wm_release_manageability(struct wm_softc *sc)
   14549 {
   14550 
   14551 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14552 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14553 
   14554 		manc |= MANC_ARP_EN;
   14555 		if (sc->sc_type >= WM_T_82571)
   14556 			manc &= ~MANC_EN_MNG2HOST;
   14557 
   14558 		CSR_WRITE(sc, WMREG_MANC, manc);
   14559 	}
   14560 }
   14561 
   14562 static void
   14563 wm_get_wakeup(struct wm_softc *sc)
   14564 {
   14565 
   14566 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14567 	switch (sc->sc_type) {
   14568 	case WM_T_82573:
   14569 	case WM_T_82583:
   14570 		sc->sc_flags |= WM_F_HAS_AMT;
   14571 		/* FALLTHROUGH */
   14572 	case WM_T_80003:
   14573 	case WM_T_82575:
   14574 	case WM_T_82576:
   14575 	case WM_T_82580:
   14576 	case WM_T_I350:
   14577 	case WM_T_I354:
   14578 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14579 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14580 		/* FALLTHROUGH */
   14581 	case WM_T_82541:
   14582 	case WM_T_82541_2:
   14583 	case WM_T_82547:
   14584 	case WM_T_82547_2:
   14585 	case WM_T_82571:
   14586 	case WM_T_82572:
   14587 	case WM_T_82574:
   14588 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14589 		break;
   14590 	case WM_T_ICH8:
   14591 	case WM_T_ICH9:
   14592 	case WM_T_ICH10:
   14593 	case WM_T_PCH:
   14594 	case WM_T_PCH2:
   14595 	case WM_T_PCH_LPT:
   14596 	case WM_T_PCH_SPT:
   14597 	case WM_T_PCH_CNP:
   14598 		sc->sc_flags |= WM_F_HAS_AMT;
   14599 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14600 		break;
   14601 	default:
   14602 		break;
   14603 	}
   14604 
   14605 	/* 1: HAS_MANAGE */
   14606 	if (wm_enable_mng_pass_thru(sc) != 0)
   14607 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14608 
   14609 	/*
   14610 	 * Note that the WOL flags is set after the resetting of the eeprom
   14611 	 * stuff
   14612 	 */
   14613 }
   14614 
   14615 /*
   14616  * Unconfigure Ultra Low Power mode.
   14617  * Only for I217 and newer (see below).
   14618  */
   14619 static int
   14620 wm_ulp_disable(struct wm_softc *sc)
   14621 {
   14622 	uint32_t reg;
   14623 	uint16_t phyreg;
   14624 	int i = 0, rv = 0;
   14625 
   14626 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14627 		device_xname(sc->sc_dev), __func__));
   14628 	/* Exclude old devices */
   14629 	if ((sc->sc_type < WM_T_PCH_LPT)
   14630 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14631 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14632 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14633 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14634 		return 0;
   14635 
   14636 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14637 		/* Request ME un-configure ULP mode in the PHY */
   14638 		reg = CSR_READ(sc, WMREG_H2ME);
   14639 		reg &= ~H2ME_ULP;
   14640 		reg |= H2ME_ENFORCE_SETTINGS;
   14641 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14642 
   14643 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14644 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14645 			if (i++ == 30) {
   14646 				device_printf(sc->sc_dev, "%s timed out\n",
   14647 				    __func__);
   14648 				return -1;
   14649 			}
   14650 			delay(10 * 1000);
   14651 		}
   14652 		reg = CSR_READ(sc, WMREG_H2ME);
   14653 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14654 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14655 
   14656 		return 0;
   14657 	}
   14658 
   14659 	/* Acquire semaphore */
   14660 	rv = sc->phy.acquire(sc);
   14661 	if (rv != 0) {
   14662 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14663 		device_xname(sc->sc_dev), __func__));
   14664 		return -1;
   14665 	}
   14666 
   14667 	/* Toggle LANPHYPC */
   14668 	wm_toggle_lanphypc_pch_lpt(sc);
   14669 
   14670 	/* Unforce SMBus mode in PHY */
   14671 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14672 	if (rv != 0) {
   14673 		uint32_t reg2;
   14674 
   14675 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14676 			__func__);
   14677 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14678 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14679 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14680 		delay(50 * 1000);
   14681 
   14682 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14683 		    &phyreg);
   14684 		if (rv != 0)
   14685 			goto release;
   14686 	}
   14687 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14688 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14689 
   14690 	/* Unforce SMBus mode in MAC */
   14691 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14692 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14693 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14694 
   14695 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14696 	if (rv != 0)
   14697 		goto release;
   14698 	phyreg |= HV_PM_CTRL_K1_ENA;
   14699 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14700 
   14701 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14702 		&phyreg);
   14703 	if (rv != 0)
   14704 		goto release;
   14705 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14706 	    | I218_ULP_CONFIG1_STICKY_ULP
   14707 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14708 	    | I218_ULP_CONFIG1_WOL_HOST
   14709 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14710 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14711 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14712 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14713 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14714 	phyreg |= I218_ULP_CONFIG1_START;
   14715 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14716 
   14717 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14718 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14719 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14720 
   14721 release:
   14722 	/* Release semaphore */
   14723 	sc->phy.release(sc);
   14724 	wm_gmii_reset(sc);
   14725 	delay(50 * 1000);
   14726 
   14727 	return rv;
   14728 }
   14729 
   14730 /* WOL in the newer chipset interfaces (pchlan) */
   14731 static int
   14732 wm_enable_phy_wakeup(struct wm_softc *sc)
   14733 {
   14734 	device_t dev = sc->sc_dev;
   14735 	uint32_t mreg, moff;
   14736 	uint16_t wuce, wuc, wufc, preg;
   14737 	int i, rv;
   14738 
   14739 	KASSERT(sc->sc_type >= WM_T_PCH);
   14740 
   14741 	/* Copy MAC RARs to PHY RARs */
   14742 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14743 
   14744 	/* Activate PHY wakeup */
   14745 	rv = sc->phy.acquire(sc);
   14746 	if (rv != 0) {
   14747 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14748 		    __func__);
   14749 		return rv;
   14750 	}
   14751 
   14752 	/*
   14753 	 * Enable access to PHY wakeup registers.
   14754 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14755 	 */
   14756 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14757 	if (rv != 0) {
   14758 		device_printf(dev,
   14759 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14760 		goto release;
   14761 	}
   14762 
   14763 	/* Copy MAC MTA to PHY MTA */
   14764 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14765 		uint16_t lo, hi;
   14766 
   14767 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14768 		lo = (uint16_t)(mreg & 0xffff);
   14769 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14770 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14771 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14772 	}
   14773 
   14774 	/* Configure PHY Rx Control register */
   14775 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14776 	mreg = CSR_READ(sc, WMREG_RCTL);
   14777 	if (mreg & RCTL_UPE)
   14778 		preg |= BM_RCTL_UPE;
   14779 	if (mreg & RCTL_MPE)
   14780 		preg |= BM_RCTL_MPE;
   14781 	preg &= ~(BM_RCTL_MO_MASK);
   14782 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14783 	if (moff != 0)
   14784 		preg |= moff << BM_RCTL_MO_SHIFT;
   14785 	if (mreg & RCTL_BAM)
   14786 		preg |= BM_RCTL_BAM;
   14787 	if (mreg & RCTL_PMCF)
   14788 		preg |= BM_RCTL_PMCF;
   14789 	mreg = CSR_READ(sc, WMREG_CTRL);
   14790 	if (mreg & CTRL_RFCE)
   14791 		preg |= BM_RCTL_RFCE;
   14792 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14793 
   14794 	wuc = WUC_APME | WUC_PME_EN;
   14795 	wufc = WUFC_MAG;
   14796 	/* Enable PHY wakeup in MAC register */
   14797 	CSR_WRITE(sc, WMREG_WUC,
   14798 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14799 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14800 
   14801 	/* Configure and enable PHY wakeup in PHY registers */
   14802 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14803 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14804 
   14805 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14806 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14807 
   14808 release:
   14809 	sc->phy.release(sc);
   14810 
   14811 	return 0;
   14812 }
   14813 
   14814 /* Power down workaround on D3 */
   14815 static void
   14816 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14817 {
   14818 	uint32_t reg;
   14819 	uint16_t phyreg;
   14820 	int i;
   14821 
   14822 	for (i = 0; i < 2; i++) {
   14823 		/* Disable link */
   14824 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14825 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14826 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14827 
   14828 		/*
   14829 		 * Call gig speed drop workaround on Gig disable before
   14830 		 * accessing any PHY registers
   14831 		 */
   14832 		if (sc->sc_type == WM_T_ICH8)
   14833 			wm_gig_downshift_workaround_ich8lan(sc);
   14834 
   14835 		/* Write VR power-down enable */
   14836 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14837 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14838 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14839 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14840 
   14841 		/* Read it back and test */
   14842 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14843 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14844 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14845 			break;
   14846 
   14847 		/* Issue PHY reset and repeat at most one more time */
   14848 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14849 	}
   14850 }
   14851 
   14852 /*
   14853  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14854  *  @sc: pointer to the HW structure
   14855  *
   14856  *  During S0 to Sx transition, it is possible the link remains at gig
   14857  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14858  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14859  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14860  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14861  *  needs to be written.
   14862  *  Parts that support (and are linked to a partner which support) EEE in
   14863  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14864  *  than 10Mbps w/o EEE.
   14865  */
   14866 static void
   14867 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14868 {
   14869 	device_t dev = sc->sc_dev;
   14870 	struct ethercom *ec = &sc->sc_ethercom;
   14871 	uint32_t phy_ctrl;
   14872 	int rv;
   14873 
   14874 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14875 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14876 
   14877 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14878 
   14879 	if (sc->sc_phytype == WMPHY_I217) {
   14880 		uint16_t devid = sc->sc_pcidevid;
   14881 
   14882 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14883 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14884 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14885 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14886 		    (sc->sc_type >= WM_T_PCH_SPT))
   14887 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14888 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14889 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14890 
   14891 		if (sc->phy.acquire(sc) != 0)
   14892 			goto out;
   14893 
   14894 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14895 			uint16_t eee_advert;
   14896 
   14897 			rv = wm_read_emi_reg_locked(dev,
   14898 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14899 			if (rv)
   14900 				goto release;
   14901 
   14902 			/*
   14903 			 * Disable LPLU if both link partners support 100BaseT
   14904 			 * EEE and 100Full is advertised on both ends of the
   14905 			 * link, and enable Auto Enable LPI since there will
   14906 			 * be no driver to enable LPI while in Sx.
   14907 			 */
   14908 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14909 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14910 				uint16_t anar, phy_reg;
   14911 
   14912 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14913 				    &anar);
   14914 				if (anar & ANAR_TX_FD) {
   14915 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14916 					    PHY_CTRL_NOND0A_LPLU);
   14917 
   14918 					/* Set Auto Enable LPI after link up */
   14919 					sc->phy.readreg_locked(dev, 2,
   14920 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14921 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14922 					sc->phy.writereg_locked(dev, 2,
   14923 					    I217_LPI_GPIO_CTRL, phy_reg);
   14924 				}
   14925 			}
   14926 		}
   14927 
   14928 		/*
   14929 		 * For i217 Intel Rapid Start Technology support,
   14930 		 * when the system is going into Sx and no manageability engine
   14931 		 * is present, the driver must configure proxy to reset only on
   14932 		 * power good.	LPI (Low Power Idle) state must also reset only
   14933 		 * on power good, as well as the MTA (Multicast table array).
   14934 		 * The SMBus release must also be disabled on LCD reset.
   14935 		 */
   14936 
   14937 		/*
   14938 		 * Enable MTA to reset for Intel Rapid Start Technology
   14939 		 * Support
   14940 		 */
   14941 
   14942 release:
   14943 		sc->phy.release(sc);
   14944 	}
   14945 out:
   14946 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14947 
   14948 	if (sc->sc_type == WM_T_ICH8)
   14949 		wm_gig_downshift_workaround_ich8lan(sc);
   14950 
   14951 	if (sc->sc_type >= WM_T_PCH) {
   14952 		wm_oem_bits_config_ich8lan(sc, false);
   14953 
   14954 		/* Reset PHY to activate OEM bits on 82577/8 */
   14955 		if (sc->sc_type == WM_T_PCH)
   14956 			wm_reset_phy(sc);
   14957 
   14958 		if (sc->phy.acquire(sc) != 0)
   14959 			return;
   14960 		wm_write_smbus_addr(sc);
   14961 		sc->phy.release(sc);
   14962 	}
   14963 }
   14964 
   14965 /*
   14966  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14967  *  @sc: pointer to the HW structure
   14968  *
   14969  *  During Sx to S0 transitions on non-managed devices or managed devices
   14970  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14971  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14972  *  the PHY.
   14973  *  On i217, setup Intel Rapid Start Technology.
   14974  */
   14975 static int
   14976 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14977 {
   14978 	device_t dev = sc->sc_dev;
   14979 	int rv;
   14980 
   14981 	if (sc->sc_type < WM_T_PCH2)
   14982 		return 0;
   14983 
   14984 	rv = wm_init_phy_workarounds_pchlan(sc);
   14985 	if (rv != 0)
   14986 		return -1;
   14987 
   14988 	/* For i217 Intel Rapid Start Technology support when the system
   14989 	 * is transitioning from Sx and no manageability engine is present
   14990 	 * configure SMBus to restore on reset, disable proxy, and enable
   14991 	 * the reset on MTA (Multicast table array).
   14992 	 */
   14993 	if (sc->sc_phytype == WMPHY_I217) {
   14994 		uint16_t phy_reg;
   14995 
   14996 		if (sc->phy.acquire(sc) != 0)
   14997 			return -1;
   14998 
   14999 		/* Clear Auto Enable LPI after link up */
   15000 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15001 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15002 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15003 
   15004 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15005 			/* Restore clear on SMB if no manageability engine
   15006 			 * is present
   15007 			 */
   15008 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15009 			    &phy_reg);
   15010 			if (rv != 0)
   15011 				goto release;
   15012 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15013 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15014 
   15015 			/* Disable Proxy */
   15016 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15017 		}
   15018 		/* Enable reset on MTA */
   15019 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15020 		if (rv != 0)
   15021 			goto release;
   15022 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15023 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15024 
   15025 release:
   15026 		sc->phy.release(sc);
   15027 		return rv;
   15028 	}
   15029 
   15030 	return 0;
   15031 }
   15032 
   15033 static void
   15034 wm_enable_wakeup(struct wm_softc *sc)
   15035 {
   15036 	uint32_t reg, pmreg;
   15037 	pcireg_t pmode;
   15038 	int rv = 0;
   15039 
   15040 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15041 		device_xname(sc->sc_dev), __func__));
   15042 
   15043 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15044 	    &pmreg, NULL) == 0)
   15045 		return;
   15046 
   15047 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15048 		goto pme;
   15049 
   15050 	/* Advertise the wakeup capability */
   15051 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15052 	    | CTRL_SWDPIN(3));
   15053 
   15054 	/* Keep the laser running on fiber adapters */
   15055 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15056 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15057 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15058 		reg |= CTRL_EXT_SWDPIN(3);
   15059 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15060 	}
   15061 
   15062 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15063 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15064 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15065 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15066 		wm_suspend_workarounds_ich8lan(sc);
   15067 
   15068 #if 0	/* For the multicast packet */
   15069 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15070 	reg |= WUFC_MC;
   15071 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15072 #endif
   15073 
   15074 	if (sc->sc_type >= WM_T_PCH) {
   15075 		rv = wm_enable_phy_wakeup(sc);
   15076 		if (rv != 0)
   15077 			goto pme;
   15078 	} else {
   15079 		/* Enable wakeup by the MAC */
   15080 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15081 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15082 	}
   15083 
   15084 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15085 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15086 		|| (sc->sc_type == WM_T_PCH2))
   15087 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15088 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15089 
   15090 pme:
   15091 	/* Request PME */
   15092 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15093 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15094 		/* For WOL */
   15095 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15096 	} else {
   15097 		/* Disable WOL */
   15098 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15099 	}
   15100 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15101 }
   15102 
   15103 /* Disable ASPM L0s and/or L1 for workaround */
   15104 static void
   15105 wm_disable_aspm(struct wm_softc *sc)
   15106 {
   15107 	pcireg_t reg, mask = 0;
   15108 	unsigned const char *str = "";
   15109 
   15110 	/*
   15111 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15112 	 * space.
   15113 	 */
   15114 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15115 		return;
   15116 
   15117 	switch (sc->sc_type) {
   15118 	case WM_T_82571:
   15119 	case WM_T_82572:
   15120 		/*
   15121 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15122 		 * State Power management L1 State (ASPM L1).
   15123 		 */
   15124 		mask = PCIE_LCSR_ASPM_L1;
   15125 		str = "L1 is";
   15126 		break;
   15127 	case WM_T_82573:
   15128 	case WM_T_82574:
   15129 	case WM_T_82583:
   15130 		/*
   15131 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15132 		 *
   15133 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15134 		 * some chipset.  The document of 82574 and 82583 says that
   15135 		 * disabling L0s with some specific chipset is sufficient,
   15136 		 * but we follow as of the Intel em driver does.
   15137 		 *
   15138 		 * References:
   15139 		 * Errata 8 of the Specification Update of i82573.
   15140 		 * Errata 20 of the Specification Update of i82574.
   15141 		 * Errata 9 of the Specification Update of i82583.
   15142 		 */
   15143 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15144 		str = "L0s and L1 are";
   15145 		break;
   15146 	default:
   15147 		return;
   15148 	}
   15149 
   15150 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15151 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15152 	reg &= ~mask;
   15153 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15154 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15155 
   15156 	/* Print only in wm_attach() */
   15157 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15158 		aprint_verbose_dev(sc->sc_dev,
   15159 		    "ASPM %s disabled to workaround the errata.\n", str);
   15160 }
   15161 
   15162 /* LPLU */
   15163 
   15164 static void
   15165 wm_lplu_d0_disable(struct wm_softc *sc)
   15166 {
   15167 	struct mii_data *mii = &sc->sc_mii;
   15168 	uint32_t reg;
   15169 	uint16_t phyval;
   15170 
   15171 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15172 		device_xname(sc->sc_dev), __func__));
   15173 
   15174 	if (sc->sc_phytype == WMPHY_IFE)
   15175 		return;
   15176 
   15177 	switch (sc->sc_type) {
   15178 	case WM_T_82571:
   15179 	case WM_T_82572:
   15180 	case WM_T_82573:
   15181 	case WM_T_82575:
   15182 	case WM_T_82576:
   15183 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15184 		phyval &= ~PMR_D0_LPLU;
   15185 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15186 		break;
   15187 	case WM_T_82580:
   15188 	case WM_T_I350:
   15189 	case WM_T_I210:
   15190 	case WM_T_I211:
   15191 		reg = CSR_READ(sc, WMREG_PHPM);
   15192 		reg &= ~PHPM_D0A_LPLU;
   15193 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15194 		break;
   15195 	case WM_T_82574:
   15196 	case WM_T_82583:
   15197 	case WM_T_ICH8:
   15198 	case WM_T_ICH9:
   15199 	case WM_T_ICH10:
   15200 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15201 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15202 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15203 		CSR_WRITE_FLUSH(sc);
   15204 		break;
   15205 	case WM_T_PCH:
   15206 	case WM_T_PCH2:
   15207 	case WM_T_PCH_LPT:
   15208 	case WM_T_PCH_SPT:
   15209 	case WM_T_PCH_CNP:
   15210 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15211 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15212 		if (wm_phy_resetisblocked(sc) == false)
   15213 			phyval |= HV_OEM_BITS_ANEGNOW;
   15214 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15215 		break;
   15216 	default:
   15217 		break;
   15218 	}
   15219 }
   15220 
   15221 /* EEE */
   15222 
   15223 static int
   15224 wm_set_eee_i350(struct wm_softc *sc)
   15225 {
   15226 	struct ethercom *ec = &sc->sc_ethercom;
   15227 	uint32_t ipcnfg, eeer;
   15228 	uint32_t ipcnfg_mask
   15229 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15230 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15231 
   15232 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15233 
   15234 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15235 	eeer = CSR_READ(sc, WMREG_EEER);
   15236 
   15237 	/* Enable or disable per user setting */
   15238 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15239 		ipcnfg |= ipcnfg_mask;
   15240 		eeer |= eeer_mask;
   15241 	} else {
   15242 		ipcnfg &= ~ipcnfg_mask;
   15243 		eeer &= ~eeer_mask;
   15244 	}
   15245 
   15246 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15247 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15248 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15249 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15250 
   15251 	return 0;
   15252 }
   15253 
   15254 static int
   15255 wm_set_eee_pchlan(struct wm_softc *sc)
   15256 {
   15257 	device_t dev = sc->sc_dev;
   15258 	struct ethercom *ec = &sc->sc_ethercom;
   15259 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15260 	int rv = 0;
   15261 
   15262 	switch (sc->sc_phytype) {
   15263 	case WMPHY_82579:
   15264 		lpa = I82579_EEE_LP_ABILITY;
   15265 		pcs_status = I82579_EEE_PCS_STATUS;
   15266 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15267 		break;
   15268 	case WMPHY_I217:
   15269 		lpa = I217_EEE_LP_ABILITY;
   15270 		pcs_status = I217_EEE_PCS_STATUS;
   15271 		adv_addr = I217_EEE_ADVERTISEMENT;
   15272 		break;
   15273 	default:
   15274 		return 0;
   15275 	}
   15276 
   15277 	if (sc->phy.acquire(sc)) {
   15278 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15279 		return 0;
   15280 	}
   15281 
   15282 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15283 	if (rv != 0)
   15284 		goto release;
   15285 
   15286 	/* Clear bits that enable EEE in various speeds */
   15287 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15288 
   15289 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15290 		/* Save off link partner's EEE ability */
   15291 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15292 		if (rv != 0)
   15293 			goto release;
   15294 
   15295 		/* Read EEE advertisement */
   15296 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15297 			goto release;
   15298 
   15299 		/*
   15300 		 * Enable EEE only for speeds in which the link partner is
   15301 		 * EEE capable and for which we advertise EEE.
   15302 		 */
   15303 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15304 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15305 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15306 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15307 			if ((data & ANLPAR_TX_FD) != 0)
   15308 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15309 			else {
   15310 				/*
   15311 				 * EEE is not supported in 100Half, so ignore
   15312 				 * partner's EEE in 100 ability if full-duplex
   15313 				 * is not advertised.
   15314 				 */
   15315 				sc->eee_lp_ability
   15316 				    &= ~AN_EEEADVERT_100_TX;
   15317 			}
   15318 		}
   15319 	}
   15320 
   15321 	if (sc->sc_phytype == WMPHY_82579) {
   15322 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15323 		if (rv != 0)
   15324 			goto release;
   15325 
   15326 		data &= ~I82579_LPI_PLL_SHUT_100;
   15327 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15328 	}
   15329 
   15330 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15331 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15332 		goto release;
   15333 
   15334 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15335 release:
   15336 	sc->phy.release(sc);
   15337 
   15338 	return rv;
   15339 }
   15340 
   15341 static int
   15342 wm_set_eee(struct wm_softc *sc)
   15343 {
   15344 	struct ethercom *ec = &sc->sc_ethercom;
   15345 
   15346 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15347 		return 0;
   15348 
   15349 	if (sc->sc_type == WM_T_I354) {
   15350 		/* I354 uses an external PHY */
   15351 		return 0; /* not yet */
   15352 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15353 		return wm_set_eee_i350(sc);
   15354 	else if (sc->sc_type >= WM_T_PCH2)
   15355 		return wm_set_eee_pchlan(sc);
   15356 
   15357 	return 0;
   15358 }
   15359 
   15360 /*
   15361  * Workarounds (mainly PHY related).
   15362  * Basically, PHY's workarounds are in the PHY drivers.
   15363  */
   15364 
   15365 /* Work-around for 82566 Kumeran PCS lock loss */
   15366 static int
   15367 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15368 {
   15369 	struct mii_data *mii = &sc->sc_mii;
   15370 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15371 	int i, reg, rv;
   15372 	uint16_t phyreg;
   15373 
   15374 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15375 		device_xname(sc->sc_dev), __func__));
   15376 
   15377 	/* If the link is not up, do nothing */
   15378 	if ((status & STATUS_LU) == 0)
   15379 		return 0;
   15380 
   15381 	/* Nothing to do if the link is other than 1Gbps */
   15382 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15383 		return 0;
   15384 
   15385 	for (i = 0; i < 10; i++) {
   15386 		/* read twice */
   15387 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15388 		if (rv != 0)
   15389 			return rv;
   15390 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15391 		if (rv != 0)
   15392 			return rv;
   15393 
   15394 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15395 			goto out;	/* GOOD! */
   15396 
   15397 		/* Reset the PHY */
   15398 		wm_reset_phy(sc);
   15399 		delay(5*1000);
   15400 	}
   15401 
   15402 	/* Disable GigE link negotiation */
   15403 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15404 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15405 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15406 
   15407 	/*
   15408 	 * Call gig speed drop workaround on Gig disable before accessing
   15409 	 * any PHY registers.
   15410 	 */
   15411 	wm_gig_downshift_workaround_ich8lan(sc);
   15412 
   15413 out:
   15414 	return 0;
   15415 }
   15416 
   15417 /*
   15418  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15419  *  @sc: pointer to the HW structure
   15420  *
   15421  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15422  *  LPLU, Gig disable, MDIC PHY reset):
   15423  *    1) Set Kumeran Near-end loopback
   15424  *    2) Clear Kumeran Near-end loopback
   15425  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15426  */
   15427 static void
   15428 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15429 {
   15430 	uint16_t kmreg;
   15431 
   15432 	/* Only for igp3 */
   15433 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15434 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15435 			return;
   15436 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15437 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15438 			return;
   15439 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15440 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15441 	}
   15442 }
   15443 
   15444 /*
   15445  * Workaround for pch's PHYs
   15446  * XXX should be moved to new PHY driver?
   15447  */
   15448 static int
   15449 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15450 {
   15451 	device_t dev = sc->sc_dev;
   15452 	struct mii_data *mii = &sc->sc_mii;
   15453 	struct mii_softc *child;
   15454 	uint16_t phy_data, phyrev = 0;
   15455 	int phytype = sc->sc_phytype;
   15456 	int rv;
   15457 
   15458 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15459 		device_xname(dev), __func__));
   15460 	KASSERT(sc->sc_type == WM_T_PCH);
   15461 
   15462 	/* Set MDIO slow mode before any other MDIO access */
   15463 	if (phytype == WMPHY_82577)
   15464 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15465 			return rv;
   15466 
   15467 	child = LIST_FIRST(&mii->mii_phys);
   15468 	if (child != NULL)
   15469 		phyrev = child->mii_mpd_rev;
   15470 
   15471 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15472 	if ((child != NULL) &&
   15473 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15474 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15475 		/* Disable generation of early preamble (0x4431) */
   15476 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15477 		    &phy_data);
   15478 		if (rv != 0)
   15479 			return rv;
   15480 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15481 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15482 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15483 		    phy_data);
   15484 		if (rv != 0)
   15485 			return rv;
   15486 
   15487 		/* Preamble tuning for SSC */
   15488 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15489 		if (rv != 0)
   15490 			return rv;
   15491 	}
   15492 
   15493 	/* 82578 */
   15494 	if (phytype == WMPHY_82578) {
   15495 		/*
   15496 		 * Return registers to default by doing a soft reset then
   15497 		 * writing 0x3140 to the control register
   15498 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15499 		 */
   15500 		if ((child != NULL) && (phyrev < 2)) {
   15501 			PHY_RESET(child);
   15502 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15503 			if (rv != 0)
   15504 				return rv;
   15505 		}
   15506 	}
   15507 
   15508 	/* Select page 0 */
   15509 	if ((rv = sc->phy.acquire(sc)) != 0)
   15510 		return rv;
   15511 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15512 	sc->phy.release(sc);
   15513 	if (rv != 0)
   15514 		return rv;
   15515 
   15516 	/*
   15517 	 * Configure the K1 Si workaround during phy reset assuming there is
   15518 	 * link so that it disables K1 if link is in 1Gbps.
   15519 	 */
   15520 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15521 		return rv;
   15522 
   15523 	/* Workaround for link disconnects on a busy hub in half duplex */
   15524 	rv = sc->phy.acquire(sc);
   15525 	if (rv)
   15526 		return rv;
   15527 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15528 	if (rv)
   15529 		goto release;
   15530 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15531 	    phy_data & 0x00ff);
   15532 	if (rv)
   15533 		goto release;
   15534 
   15535 	/* Set MSE higher to enable link to stay up when noise is high */
   15536 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15537 release:
   15538 	sc->phy.release(sc);
   15539 
   15540 	return rv;
   15541 }
   15542 
   15543 /*
   15544  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15545  *  @sc:   pointer to the HW structure
   15546  */
   15547 static void
   15548 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15549 {
   15550 	device_t dev = sc->sc_dev;
   15551 	uint32_t mac_reg;
   15552 	uint16_t i, wuce;
   15553 	int count;
   15554 
   15555 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15556 		device_xname(sc->sc_dev), __func__));
   15557 
   15558 	if (sc->phy.acquire(sc) != 0)
   15559 		return;
   15560 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15561 		goto release;
   15562 
   15563 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15564 	count = wm_rar_count(sc);
   15565 	for (i = 0; i < count; i++) {
   15566 		uint16_t lo, hi;
   15567 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15568 		lo = (uint16_t)(mac_reg & 0xffff);
   15569 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15570 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15571 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15572 
   15573 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15574 		lo = (uint16_t)(mac_reg & 0xffff);
   15575 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15576 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15577 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15578 	}
   15579 
   15580 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15581 
   15582 release:
   15583 	sc->phy.release(sc);
   15584 }
   15585 
   15586 /*
   15587  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15588  *  done after every PHY reset.
   15589  */
   15590 static int
   15591 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15592 {
   15593 	device_t dev = sc->sc_dev;
   15594 	int rv;
   15595 
   15596 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15597 		device_xname(dev), __func__));
   15598 	KASSERT(sc->sc_type == WM_T_PCH2);
   15599 
   15600 	/* Set MDIO slow mode before any other MDIO access */
   15601 	rv = wm_set_mdio_slow_mode_hv(sc);
   15602 	if (rv != 0)
   15603 		return rv;
   15604 
   15605 	rv = sc->phy.acquire(sc);
   15606 	if (rv != 0)
   15607 		return rv;
   15608 	/* Set MSE higher to enable link to stay up when noise is high */
   15609 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15610 	if (rv != 0)
   15611 		goto release;
   15612 	/* Drop link after 5 times MSE threshold was reached */
   15613 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15614 release:
   15615 	sc->phy.release(sc);
   15616 
   15617 	return rv;
   15618 }
   15619 
   15620 /**
   15621  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15622  *  @link: link up bool flag
   15623  *
   15624  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15625  *  preventing further DMA write requests.  Workaround the issue by disabling
   15626  *  the de-assertion of the clock request when in 1Gpbs mode.
   15627  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15628  *  speeds in order to avoid Tx hangs.
   15629  **/
   15630 static int
   15631 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15632 {
   15633 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15634 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15635 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15636 	uint16_t phyreg;
   15637 
   15638 	if (link && (speed == STATUS_SPEED_1000)) {
   15639 		sc->phy.acquire(sc);
   15640 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15641 		    &phyreg);
   15642 		if (rv != 0)
   15643 			goto release;
   15644 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15645 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15646 		if (rv != 0)
   15647 			goto release;
   15648 		delay(20);
   15649 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15650 
   15651 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15652 		    &phyreg);
   15653 release:
   15654 		sc->phy.release(sc);
   15655 		return rv;
   15656 	}
   15657 
   15658 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15659 
   15660 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15661 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15662 	    || !link
   15663 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15664 		goto update_fextnvm6;
   15665 
   15666 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15667 
   15668 	/* Clear link status transmit timeout */
   15669 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15670 	if (speed == STATUS_SPEED_100) {
   15671 		/* Set inband Tx timeout to 5x10us for 100Half */
   15672 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15673 
   15674 		/* Do not extend the K1 entry latency for 100Half */
   15675 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15676 	} else {
   15677 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15678 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15679 
   15680 		/* Extend the K1 entry latency for 10 Mbps */
   15681 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15682 	}
   15683 
   15684 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15685 
   15686 update_fextnvm6:
   15687 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15688 	return 0;
   15689 }
   15690 
   15691 /*
   15692  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15693  *  @sc:   pointer to the HW structure
   15694  *  @link: link up bool flag
   15695  *
   15696  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15697  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15698  *  If link is down, the function will restore the default K1 setting located
   15699  *  in the NVM.
   15700  */
   15701 static int
   15702 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15703 {
   15704 	int k1_enable = sc->sc_nvm_k1_enabled;
   15705 
   15706 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15707 		device_xname(sc->sc_dev), __func__));
   15708 
   15709 	if (sc->phy.acquire(sc) != 0)
   15710 		return -1;
   15711 
   15712 	if (link) {
   15713 		k1_enable = 0;
   15714 
   15715 		/* Link stall fix for link up */
   15716 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15717 		    0x0100);
   15718 	} else {
   15719 		/* Link stall fix for link down */
   15720 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15721 		    0x4100);
   15722 	}
   15723 
   15724 	wm_configure_k1_ich8lan(sc, k1_enable);
   15725 	sc->phy.release(sc);
   15726 
   15727 	return 0;
   15728 }
   15729 
   15730 /*
   15731  *  wm_k1_workaround_lv - K1 Si workaround
   15732  *  @sc:   pointer to the HW structure
   15733  *
   15734  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15735  *  Disable K1 for 1000 and 100 speeds
   15736  */
   15737 static int
   15738 wm_k1_workaround_lv(struct wm_softc *sc)
   15739 {
   15740 	uint32_t reg;
   15741 	uint16_t phyreg;
   15742 	int rv;
   15743 
   15744 	if (sc->sc_type != WM_T_PCH2)
   15745 		return 0;
   15746 
   15747 	/* Set K1 beacon duration based on 10Mbps speed */
   15748 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15749 	if (rv != 0)
   15750 		return rv;
   15751 
   15752 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15753 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15754 		if (phyreg &
   15755 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15756 			/* LV 1G/100 Packet drop issue wa  */
   15757 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15758 			    &phyreg);
   15759 			if (rv != 0)
   15760 				return rv;
   15761 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15762 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15763 			    phyreg);
   15764 			if (rv != 0)
   15765 				return rv;
   15766 		} else {
   15767 			/* For 10Mbps */
   15768 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15769 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15770 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15771 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15772 		}
   15773 	}
   15774 
   15775 	return 0;
   15776 }
   15777 
   15778 /*
   15779  *  wm_link_stall_workaround_hv - Si workaround
   15780  *  @sc: pointer to the HW structure
   15781  *
   15782  *  This function works around a Si bug where the link partner can get
   15783  *  a link up indication before the PHY does. If small packets are sent
   15784  *  by the link partner they can be placed in the packet buffer without
   15785  *  being properly accounted for by the PHY and will stall preventing
   15786  *  further packets from being received.  The workaround is to clear the
   15787  *  packet buffer after the PHY detects link up.
   15788  */
   15789 static int
   15790 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15791 {
   15792 	uint16_t phyreg;
   15793 
   15794 	if (sc->sc_phytype != WMPHY_82578)
   15795 		return 0;
   15796 
   15797 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15798 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15799 	if ((phyreg & BMCR_LOOP) != 0)
   15800 		return 0;
   15801 
   15802 	/* Check if link is up and at 1Gbps */
   15803 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15804 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15805 	    | BM_CS_STATUS_SPEED_MASK;
   15806 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15807 		| BM_CS_STATUS_SPEED_1000))
   15808 		return 0;
   15809 
   15810 	delay(200 * 1000);	/* XXX too big */
   15811 
   15812 	/* Flush the packets in the fifo buffer */
   15813 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15814 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15815 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15816 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15817 
   15818 	return 0;
   15819 }
   15820 
   15821 static int
   15822 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15823 {
   15824 	int rv;
   15825 	uint16_t reg;
   15826 
   15827 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15828 	if (rv != 0)
   15829 		return rv;
   15830 
   15831 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15832 	    reg | HV_KMRN_MDIO_SLOW);
   15833 }
   15834 
   15835 /*
   15836  *  wm_configure_k1_ich8lan - Configure K1 power state
   15837  *  @sc: pointer to the HW structure
   15838  *  @enable: K1 state to configure
   15839  *
   15840  *  Configure the K1 power state based on the provided parameter.
   15841  *  Assumes semaphore already acquired.
   15842  */
   15843 static void
   15844 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15845 {
   15846 	uint32_t ctrl, ctrl_ext, tmp;
   15847 	uint16_t kmreg;
   15848 	int rv;
   15849 
   15850 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15851 
   15852 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15853 	if (rv != 0)
   15854 		return;
   15855 
   15856 	if (k1_enable)
   15857 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15858 	else
   15859 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15860 
   15861 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15862 	if (rv != 0)
   15863 		return;
   15864 
   15865 	delay(20);
   15866 
   15867 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15868 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15869 
   15870 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15871 	tmp |= CTRL_FRCSPD;
   15872 
   15873 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15874 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15875 	CSR_WRITE_FLUSH(sc);
   15876 	delay(20);
   15877 
   15878 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15879 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15880 	CSR_WRITE_FLUSH(sc);
   15881 	delay(20);
   15882 
   15883 	return;
   15884 }
   15885 
   15886 /* special case - for 82575 - need to do manual init ... */
   15887 static void
   15888 wm_reset_init_script_82575(struct wm_softc *sc)
   15889 {
   15890 	/*
   15891 	 * Remark: this is untested code - we have no board without EEPROM
   15892 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15893 	 */
   15894 
   15895 	/* SerDes configuration via SERDESCTRL */
   15896 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15897 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15898 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15899 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15900 
   15901 	/* CCM configuration via CCMCTL register */
   15902 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15903 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15904 
   15905 	/* PCIe lanes configuration */
   15906 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15907 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15908 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15909 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15910 
   15911 	/* PCIe PLL Configuration */
   15912 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15913 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15914 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15915 }
   15916 
   15917 static void
   15918 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15919 {
   15920 	uint32_t reg;
   15921 	uint16_t nvmword;
   15922 	int rv;
   15923 
   15924 	if (sc->sc_type != WM_T_82580)
   15925 		return;
   15926 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15927 		return;
   15928 
   15929 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15930 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15931 	if (rv != 0) {
   15932 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15933 		    __func__);
   15934 		return;
   15935 	}
   15936 
   15937 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15938 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15939 		reg |= MDICNFG_DEST;
   15940 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15941 		reg |= MDICNFG_COM_MDIO;
   15942 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15943 }
   15944 
   15945 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15946 
   15947 static bool
   15948 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15949 {
   15950 	uint32_t reg;
   15951 	uint16_t id1, id2;
   15952 	int i, rv;
   15953 
   15954 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15955 		device_xname(sc->sc_dev), __func__));
   15956 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15957 
   15958 	id1 = id2 = 0xffff;
   15959 	for (i = 0; i < 2; i++) {
   15960 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15961 		    &id1);
   15962 		if ((rv != 0) || MII_INVALIDID(id1))
   15963 			continue;
   15964 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15965 		    &id2);
   15966 		if ((rv != 0) || MII_INVALIDID(id2))
   15967 			continue;
   15968 		break;
   15969 	}
   15970 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15971 		goto out;
   15972 
   15973 	/*
   15974 	 * In case the PHY needs to be in mdio slow mode,
   15975 	 * set slow mode and try to get the PHY id again.
   15976 	 */
   15977 	rv = 0;
   15978 	if (sc->sc_type < WM_T_PCH_LPT) {
   15979 		sc->phy.release(sc);
   15980 		wm_set_mdio_slow_mode_hv(sc);
   15981 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15982 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15983 		sc->phy.acquire(sc);
   15984 	}
   15985 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15986 		device_printf(sc->sc_dev, "XXX return with false\n");
   15987 		return false;
   15988 	}
   15989 out:
   15990 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15991 		/* Only unforce SMBus if ME is not active */
   15992 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15993 			uint16_t phyreg;
   15994 
   15995 			/* Unforce SMBus mode in PHY */
   15996 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15997 			    CV_SMB_CTRL, &phyreg);
   15998 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15999 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16000 			    CV_SMB_CTRL, phyreg);
   16001 
   16002 			/* Unforce SMBus mode in MAC */
   16003 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16004 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16005 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16006 		}
   16007 	}
   16008 	return true;
   16009 }
   16010 
   16011 static void
   16012 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16013 {
   16014 	uint32_t reg;
   16015 	int i;
   16016 
   16017 	/* Set PHY Config Counter to 50msec */
   16018 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16019 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16020 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16021 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16022 
   16023 	/* Toggle LANPHYPC */
   16024 	reg = CSR_READ(sc, WMREG_CTRL);
   16025 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16026 	reg &= ~CTRL_LANPHYPC_VALUE;
   16027 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16028 	CSR_WRITE_FLUSH(sc);
   16029 	delay(1000);
   16030 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16031 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16032 	CSR_WRITE_FLUSH(sc);
   16033 
   16034 	if (sc->sc_type < WM_T_PCH_LPT)
   16035 		delay(50 * 1000);
   16036 	else {
   16037 		i = 20;
   16038 
   16039 		do {
   16040 			delay(5 * 1000);
   16041 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16042 		    && i--);
   16043 
   16044 		delay(30 * 1000);
   16045 	}
   16046 }
   16047 
   16048 static int
   16049 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16050 {
   16051 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16052 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16053 	uint32_t rxa;
   16054 	uint16_t scale = 0, lat_enc = 0;
   16055 	int32_t obff_hwm = 0;
   16056 	int64_t lat_ns, value;
   16057 
   16058 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16059 		device_xname(sc->sc_dev), __func__));
   16060 
   16061 	if (link) {
   16062 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16063 		uint32_t status;
   16064 		uint16_t speed;
   16065 		pcireg_t preg;
   16066 
   16067 		status = CSR_READ(sc, WMREG_STATUS);
   16068 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16069 		case STATUS_SPEED_10:
   16070 			speed = 10;
   16071 			break;
   16072 		case STATUS_SPEED_100:
   16073 			speed = 100;
   16074 			break;
   16075 		case STATUS_SPEED_1000:
   16076 			speed = 1000;
   16077 			break;
   16078 		default:
   16079 			device_printf(sc->sc_dev, "Unknown speed "
   16080 			    "(status = %08x)\n", status);
   16081 			return -1;
   16082 		}
   16083 
   16084 		/* Rx Packet Buffer Allocation size (KB) */
   16085 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16086 
   16087 		/*
   16088 		 * Determine the maximum latency tolerated by the device.
   16089 		 *
   16090 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16091 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16092 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16093 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16094 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16095 		 */
   16096 		lat_ns = ((int64_t)rxa * 1024 -
   16097 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16098 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16099 		if (lat_ns < 0)
   16100 			lat_ns = 0;
   16101 		else
   16102 			lat_ns /= speed;
   16103 		value = lat_ns;
   16104 
   16105 		while (value > LTRV_VALUE) {
   16106 			scale ++;
   16107 			value = howmany(value, __BIT(5));
   16108 		}
   16109 		if (scale > LTRV_SCALE_MAX) {
   16110 			device_printf(sc->sc_dev,
   16111 			    "Invalid LTR latency scale %d\n", scale);
   16112 			return -1;
   16113 		}
   16114 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16115 
   16116 		/* Determine the maximum latency tolerated by the platform */
   16117 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16118 		    WM_PCI_LTR_CAP_LPT);
   16119 		max_snoop = preg & 0xffff;
   16120 		max_nosnoop = preg >> 16;
   16121 
   16122 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16123 
   16124 		if (lat_enc > max_ltr_enc) {
   16125 			lat_enc = max_ltr_enc;
   16126 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16127 			    * PCI_LTR_SCALETONS(
   16128 				    __SHIFTOUT(lat_enc,
   16129 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16130 		}
   16131 
   16132 		if (lat_ns) {
   16133 			lat_ns *= speed * 1000;
   16134 			lat_ns /= 8;
   16135 			lat_ns /= 1000000000;
   16136 			obff_hwm = (int32_t)(rxa - lat_ns);
   16137 		}
   16138 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16139 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16140 			    "(rxa = %d, lat_ns = %d)\n",
   16141 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16142 			return -1;
   16143 		}
   16144 	}
   16145 	/* Snoop and No-Snoop latencies the same */
   16146 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16147 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16148 
   16149 	/* Set OBFF high water mark */
   16150 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16151 	reg |= obff_hwm;
   16152 	CSR_WRITE(sc, WMREG_SVT, reg);
   16153 
   16154 	/* Enable OBFF */
   16155 	reg = CSR_READ(sc, WMREG_SVCR);
   16156 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16157 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16158 
   16159 	return 0;
   16160 }
   16161 
   16162 /*
   16163  * I210 Errata 25 and I211 Errata 10
   16164  * Slow System Clock.
   16165  */
   16166 static int
   16167 wm_pll_workaround_i210(struct wm_softc *sc)
   16168 {
   16169 	uint32_t mdicnfg, wuc;
   16170 	uint32_t reg;
   16171 	pcireg_t pcireg;
   16172 	uint32_t pmreg;
   16173 	uint16_t nvmword, tmp_nvmword;
   16174 	uint16_t phyval;
   16175 	bool wa_done = false;
   16176 	int i, rv = 0;
   16177 
   16178 	/* Get Power Management cap offset */
   16179 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16180 	    &pmreg, NULL) == 0)
   16181 		return -1;
   16182 
   16183 	/* Save WUC and MDICNFG registers */
   16184 	wuc = CSR_READ(sc, WMREG_WUC);
   16185 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16186 
   16187 	reg = mdicnfg & ~MDICNFG_DEST;
   16188 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16189 
   16190 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16191 		nvmword = INVM_DEFAULT_AL;
   16192 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16193 
   16194 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16195 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16196 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16197 
   16198 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16199 			rv = 0;
   16200 			break; /* OK */
   16201 		} else
   16202 			rv = -1;
   16203 
   16204 		wa_done = true;
   16205 		/* Directly reset the internal PHY */
   16206 		reg = CSR_READ(sc, WMREG_CTRL);
   16207 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16208 
   16209 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16210 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16211 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16212 
   16213 		CSR_WRITE(sc, WMREG_WUC, 0);
   16214 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16215 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16216 
   16217 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16218 		    pmreg + PCI_PMCSR);
   16219 		pcireg |= PCI_PMCSR_STATE_D3;
   16220 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16221 		    pmreg + PCI_PMCSR, pcireg);
   16222 		delay(1000);
   16223 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16224 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16225 		    pmreg + PCI_PMCSR, pcireg);
   16226 
   16227 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16228 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16229 
   16230 		/* Restore WUC register */
   16231 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16232 	}
   16233 
   16234 	/* Restore MDICNFG setting */
   16235 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16236 	if (wa_done)
   16237 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16238 	return rv;
   16239 }
   16240 
   16241 static void
   16242 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16243 {
   16244 	uint32_t reg;
   16245 
   16246 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16247 		device_xname(sc->sc_dev), __func__));
   16248 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16249 	    || (sc->sc_type == WM_T_PCH_CNP));
   16250 
   16251 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16252 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16253 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16254 
   16255 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16256 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16257 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16258 }
   16259