Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.635
      1 /*	$NetBSD: if_wm.c,v 1.635 2019/05/14 09:43:55 ozaki-r Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.635 2019/05/14 09:43:55 ozaki-r Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_ec_capenable;		/* last ec_capenable */
    518 	int sc_flowflags;		/* 802.3x flow control flags */
    519 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    520 	int sc_align_tweak;
    521 
    522 	void *sc_ihs[WM_MAX_NINTR];	/*
    523 					 * interrupt cookie.
    524 					 * - legacy and msi use sc_ihs[0] only
    525 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	pci_intr_handle_t *sc_intrs;	/*
    528 					 * legacy and msi use sc_intrs[0] only
    529 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    530 					 */
    531 	int sc_nintrs;			/* number of interrupts */
    532 
    533 	int sc_link_intr_idx;		/* index of MSI-X tables */
    534 
    535 	callout_t sc_tick_ch;		/* tick callout */
    536 	bool sc_core_stopping;
    537 
    538 	int sc_nvm_ver_major;
    539 	int sc_nvm_ver_minor;
    540 	int sc_nvm_ver_build;
    541 	int sc_nvm_addrbits;		/* NVM address bits */
    542 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    543 	int sc_ich8_flash_base;
    544 	int sc_ich8_flash_bank_size;
    545 	int sc_nvm_k1_enabled;
    546 
    547 	int sc_nqueues;
    548 	struct wm_queue *sc_queue;
    549 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    550 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    551 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    552 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    553 
    554 	int sc_affinity_offset;
    555 
    556 #ifdef WM_EVENT_COUNTERS
    557 	/* Event counters. */
    558 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    559 
    560 	/* WM_T_82542_2_1 only */
    561 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    564 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    565 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    566 #endif /* WM_EVENT_COUNTERS */
    567 
    568 	/* This variable are used only on the 82547. */
    569 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    570 
    571 	uint32_t sc_ctrl;		/* prototype CTRL register */
    572 #if 0
    573 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    574 #endif
    575 	uint32_t sc_icr;		/* prototype interrupt bits */
    576 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    577 	uint32_t sc_tctl;		/* prototype TCTL register */
    578 	uint32_t sc_rctl;		/* prototype RCTL register */
    579 	uint32_t sc_txcw;		/* prototype TXCW register */
    580 	uint32_t sc_tipg;		/* prototype TIPG register */
    581 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    582 	uint32_t sc_pba;		/* prototype PBA register */
    583 
    584 	int sc_tbi_linkup;		/* TBI link status */
    585 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    586 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    587 
    588 	int sc_mchash_type;		/* multicast filter offset */
    589 
    590 	krndsource_t rnd_source;	/* random source */
    591 
    592 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    593 
    594 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    595 	kmutex_t *sc_ich_phymtx;	/*
    596 					 * 82574/82583/ICH/PCH specific PHY
    597 					 * mutex. For 82574/82583, the mutex
    598 					 * is used for both PHY and NVM.
    599 					 */
    600 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    601 
    602 	struct wm_phyop phy;
    603 	struct wm_nvmop nvm;
    604 };
    605 
    606 #define WM_CORE_LOCK(_sc)						\
    607 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)						\
    609 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    610 #define WM_CORE_LOCKED(_sc)						\
    611 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    612 
    613 #define	WM_RXCHAIN_RESET(rxq)						\
    614 do {									\
    615 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    616 	*(rxq)->rxq_tailp = NULL;					\
    617 	(rxq)->rxq_len = 0;						\
    618 } while (/*CONSTCOND*/0)
    619 
    620 #define	WM_RXCHAIN_LINK(rxq, m)						\
    621 do {									\
    622 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    623 	(rxq)->rxq_tailp = &(m)->m_next;				\
    624 } while (/*CONSTCOND*/0)
    625 
    626 #ifdef WM_EVENT_COUNTERS
    627 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    628 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    629 
    630 #define WM_Q_EVCNT_INCR(qname, evname)			\
    631 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    632 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    633 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    634 #else /* !WM_EVENT_COUNTERS */
    635 #define	WM_EVCNT_INCR(ev)	/* nothing */
    636 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    637 
    638 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    639 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    640 #endif /* !WM_EVENT_COUNTERS */
    641 
    642 #define	CSR_READ(sc, reg)						\
    643 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    644 #define	CSR_WRITE(sc, reg, val)						\
    645 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    646 #define	CSR_WRITE_FLUSH(sc)						\
    647 	(void)CSR_READ((sc), WMREG_STATUS)
    648 
    649 #define ICH8_FLASH_READ32(sc, reg)					\
    650 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset)
    652 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    653 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    654 	    (reg) + sc->sc_flashreg_offset, (data))
    655 
    656 #define ICH8_FLASH_READ16(sc, reg)					\
    657 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    658 	    (reg) + sc->sc_flashreg_offset)
    659 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    660 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    661 	    (reg) + sc->sc_flashreg_offset, (data))
    662 
    663 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    664 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    665 
    666 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    667 #define	WM_CDTXADDR_HI(txq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    670 
    671 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    672 #define	WM_CDRXADDR_HI(rxq, x)						\
    673 	(sizeof(bus_addr_t) == 8 ?					\
    674 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    675 
    676 /*
    677  * Register read/write functions.
    678  * Other than CSR_{READ|WRITE}().
    679  */
    680 #if 0
    681 static inline uint32_t wm_io_read(struct wm_softc *, int);
    682 #endif
    683 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    684 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    685     uint32_t, uint32_t);
    686 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    687 
    688 /*
    689  * Descriptor sync/init functions.
    690  */
    691 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    692 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    693 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    694 
    695 /*
    696  * Device driver interface functions and commonly used functions.
    697  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    698  */
    699 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    700 static int	wm_match(device_t, cfdata_t, void *);
    701 static void	wm_attach(device_t, device_t, void *);
    702 static int	wm_detach(device_t, int);
    703 static bool	wm_suspend(device_t, const pmf_qual_t *);
    704 static bool	wm_resume(device_t, const pmf_qual_t *);
    705 static void	wm_watchdog(struct ifnet *);
    706 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    709     uint16_t *);
    710 static void	wm_tick(void *);
    711 static int	wm_ifflags_cb(struct ethercom *);
    712 static int	wm_ioctl(struct ifnet *, u_long, void *);
    713 /* MAC address related */
    714 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    715 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    716 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    717 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    718 static int	wm_rar_count(struct wm_softc *);
    719 static void	wm_set_filter(struct wm_softc *);
    720 /* Reset and init related */
    721 static void	wm_set_vlan(struct wm_softc *);
    722 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    723 static void	wm_get_auto_rd_done(struct wm_softc *);
    724 static void	wm_lan_init_done(struct wm_softc *);
    725 static void	wm_get_cfg_done(struct wm_softc *);
    726 static int	wm_phy_post_reset(struct wm_softc *);
    727 static int	wm_write_smbus_addr(struct wm_softc *);
    728 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    729 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    730 static void	wm_initialize_hardware_bits(struct wm_softc *);
    731 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    732 static int	wm_reset_phy(struct wm_softc *);
    733 static void	wm_flush_desc_rings(struct wm_softc *);
    734 static void	wm_reset(struct wm_softc *);
    735 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    736 static void	wm_rxdrain(struct wm_rxqueue *);
    737 static void	wm_init_rss(struct wm_softc *);
    738 static void	wm_adjust_qnum(struct wm_softc *, int);
    739 static inline bool	wm_is_using_msix(struct wm_softc *);
    740 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    741 static int	wm_softint_establish(struct wm_softc *, int, int);
    742 static int	wm_setup_legacy(struct wm_softc *);
    743 static int	wm_setup_msix(struct wm_softc *);
    744 static int	wm_init(struct ifnet *);
    745 static int	wm_init_locked(struct ifnet *);
    746 static void	wm_unset_stopping_flags(struct wm_softc *);
    747 static void	wm_set_stopping_flags(struct wm_softc *);
    748 static void	wm_stop(struct ifnet *, int);
    749 static void	wm_stop_locked(struct ifnet *, int);
    750 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    751 static void	wm_82547_txfifo_stall(void *);
    752 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    753 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    754 /* DMA related */
    755 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    758 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    759     struct wm_txqueue *);
    760 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    762 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    763     struct wm_rxqueue *);
    764 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    766 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    767 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    769 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    770 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_txqueue *);
    772 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    773     struct wm_rxqueue *);
    774 static int	wm_alloc_txrx_queues(struct wm_softc *);
    775 static void	wm_free_txrx_queues(struct wm_softc *);
    776 static int	wm_init_txrx_queues(struct wm_softc *);
    777 /* Start */
    778 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    779     struct wm_txsoft *, uint32_t *, uint8_t *);
    780 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    781 static void	wm_start(struct ifnet *);
    782 static void	wm_start_locked(struct ifnet *);
    783 static int	wm_transmit(struct ifnet *, struct mbuf *);
    784 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    785 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    786     bool);
    787 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    788     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    789 static void	wm_nq_start(struct ifnet *);
    790 static void	wm_nq_start_locked(struct ifnet *);
    791 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    792 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    793 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    794     bool);
    795 static void	wm_deferred_start_locked(struct wm_txqueue *);
    796 static void	wm_handle_queue(void *);
    797 /* Interrupt */
    798 static bool	wm_txeof(struct wm_txqueue *, u_int);
    799 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    800 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    802 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    803 static void	wm_linkintr(struct wm_softc *, uint32_t);
    804 static int	wm_intr_legacy(void *);
    805 static inline void	wm_txrxintr_disable(struct wm_queue *);
    806 static inline void	wm_txrxintr_enable(struct wm_queue *);
    807 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    808 static int	wm_txrxintr_msix(void *);
    809 static int	wm_linkintr_msix(void *);
    810 
    811 /*
    812  * Media related.
    813  * GMII, SGMII, TBI, SERDES and SFP.
    814  */
    815 /* Common */
    816 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    817 /* GMII related */
    818 static void	wm_gmii_reset(struct wm_softc *);
    819 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    820 static int	wm_get_phy_id_82575(struct wm_softc *);
    821 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    822 static int	wm_gmii_mediachange(struct ifnet *);
    823 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    825 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    826 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    827 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    828 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    830 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    831 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    832 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    833 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    834 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    835 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    836 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    837 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    838 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    839 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    840 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    841 	bool);
    842 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    844 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    845 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    846 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    847 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    848 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    849 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    850 static void	wm_gmii_statchg(struct ifnet *);
    851 /*
    852  * kumeran related (80003, ICH* and PCH*).
    853  * These functions are not for accessing MII registers but for accessing
    854  * kumeran specific registers.
    855  */
    856 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    857 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    858 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    859 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    860 /* EMI register related */
    861 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    862 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    863 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    864 /* SGMII */
    865 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    866 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    867 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    868 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    869 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    870 /* TBI related */
    871 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    872 static void	wm_tbi_mediainit(struct wm_softc *);
    873 static int	wm_tbi_mediachange(struct ifnet *);
    874 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    875 static int	wm_check_for_link(struct wm_softc *);
    876 static void	wm_tbi_tick(struct wm_softc *);
    877 /* SERDES related */
    878 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    879 static int	wm_serdes_mediachange(struct ifnet *);
    880 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_serdes_tick(struct wm_softc *);
    882 /* SFP related */
    883 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    884 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    885 
    886 /*
    887  * NVM related.
    888  * Microwire, SPI (w/wo EERD) and Flash.
    889  */
    890 /* Misc functions */
    891 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    892 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    893 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    894 /* Microwire */
    895 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    896 /* SPI */
    897 static int	wm_nvm_ready_spi(struct wm_softc *);
    898 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    899 /* Using with EERD */
    900 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    901 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    902 /* Flash */
    903 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    904     unsigned int *);
    905 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    906 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    907 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    908     uint32_t *);
    909 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    910 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    911 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    912 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    913 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    914 /* iNVM */
    915 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    916 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    917 /* Lock, detecting NVM type, validate checksum and read */
    918 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    919 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    920 static int	wm_nvm_validate_checksum(struct wm_softc *);
    921 static void	wm_nvm_version_invm(struct wm_softc *);
    922 static void	wm_nvm_version(struct wm_softc *);
    923 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    924 
    925 /*
    926  * Hardware semaphores.
    927  * Very complexed...
    928  */
    929 static int	wm_get_null(struct wm_softc *);
    930 static void	wm_put_null(struct wm_softc *);
    931 static int	wm_get_eecd(struct wm_softc *);
    932 static void	wm_put_eecd(struct wm_softc *);
    933 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    934 static void	wm_put_swsm_semaphore(struct wm_softc *);
    935 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    936 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    937 static int	wm_get_nvm_80003(struct wm_softc *);
    938 static void	wm_put_nvm_80003(struct wm_softc *);
    939 static int	wm_get_nvm_82571(struct wm_softc *);
    940 static void	wm_put_nvm_82571(struct wm_softc *);
    941 static int	wm_get_phy_82575(struct wm_softc *);
    942 static void	wm_put_phy_82575(struct wm_softc *);
    943 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    944 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    945 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    946 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    947 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    948 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    949 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    950 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    951 
    952 /*
    953  * Management mode and power management related subroutines.
    954  * BMC, AMT, suspend/resume and EEE.
    955  */
    956 #if 0
    957 static int	wm_check_mng_mode(struct wm_softc *);
    958 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    959 static int	wm_check_mng_mode_82574(struct wm_softc *);
    960 static int	wm_check_mng_mode_generic(struct wm_softc *);
    961 #endif
    962 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    963 static bool	wm_phy_resetisblocked(struct wm_softc *);
    964 static void	wm_get_hw_control(struct wm_softc *);
    965 static void	wm_release_hw_control(struct wm_softc *);
    966 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    967 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    968 static void	wm_init_manageability(struct wm_softc *);
    969 static void	wm_release_manageability(struct wm_softc *);
    970 static void	wm_get_wakeup(struct wm_softc *);
    971 static int	wm_ulp_disable(struct wm_softc *);
    972 static int	wm_enable_phy_wakeup(struct wm_softc *);
    973 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    974 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    975 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    976 static void	wm_enable_wakeup(struct wm_softc *);
    977 static void	wm_disable_aspm(struct wm_softc *);
    978 /* LPLU (Low Power Link Up) */
    979 static void	wm_lplu_d0_disable(struct wm_softc *);
    980 /* EEE */
    981 static int	wm_set_eee_i350(struct wm_softc *);
    982 static int	wm_set_eee_pchlan(struct wm_softc *);
    983 static int	wm_set_eee(struct wm_softc *);
    984 
    985 /*
    986  * Workarounds (mainly PHY related).
    987  * Basically, PHY's workarounds are in the PHY drivers.
    988  */
    989 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    990 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    991 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    993 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    994 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    995 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    996 static int	wm_k1_workaround_lv(struct wm_softc *);
    997 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    998 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    999 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1000 static void	wm_reset_init_script_82575(struct wm_softc *);
   1001 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1002 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1003 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1004 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1005 static int	wm_pll_workaround_i210(struct wm_softc *);
   1006 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1007 
   1008 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1009     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1010 
   1011 /*
   1012  * Devices supported by this driver.
   1013  */
   1014 static const struct wm_product {
   1015 	pci_vendor_id_t		wmp_vendor;
   1016 	pci_product_id_t	wmp_product;
   1017 	const char		*wmp_name;
   1018 	wm_chip_type		wmp_type;
   1019 	uint32_t		wmp_flags;
   1020 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1021 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1022 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1023 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1024 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1025 } wm_products[] = {
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1027 	  "Intel i82542 1000BASE-X Ethernet",
   1028 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1031 	  "Intel i82543GC 1000BASE-X Ethernet",
   1032 	  WM_T_82543,		WMP_F_FIBER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1035 	  "Intel i82543GC 1000BASE-T Ethernet",
   1036 	  WM_T_82543,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1039 	  "Intel i82544EI 1000BASE-T Ethernet",
   1040 	  WM_T_82544,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1043 	  "Intel i82544EI 1000BASE-X Ethernet",
   1044 	  WM_T_82544,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1047 	  "Intel i82544GC 1000BASE-T Ethernet",
   1048 	  WM_T_82544,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1051 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1052 	  WM_T_82544,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1055 	  "Intel i82540EM 1000BASE-T Ethernet",
   1056 	  WM_T_82540,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1059 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1060 	  WM_T_82540,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1063 	  "Intel i82540EP 1000BASE-T Ethernet",
   1064 	  WM_T_82540,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1067 	  "Intel i82540EP 1000BASE-T Ethernet",
   1068 	  WM_T_82540,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1071 	  "Intel i82540EP 1000BASE-T Ethernet",
   1072 	  WM_T_82540,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1075 	  "Intel i82545EM 1000BASE-T Ethernet",
   1076 	  WM_T_82545,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1079 	  "Intel i82545GM 1000BASE-T Ethernet",
   1080 	  WM_T_82545_3,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1083 	  "Intel i82545GM 1000BASE-X Ethernet",
   1084 	  WM_T_82545_3,		WMP_F_FIBER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1087 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1088 	  WM_T_82545_3,		WMP_F_SERDES },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1091 	  "Intel i82546EB 1000BASE-T Ethernet",
   1092 	  WM_T_82546,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1095 	  "Intel i82546EB 1000BASE-T Ethernet",
   1096 	  WM_T_82546,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1099 	  "Intel i82545EM 1000BASE-X Ethernet",
   1100 	  WM_T_82545,		WMP_F_FIBER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1103 	  "Intel i82546EB 1000BASE-X Ethernet",
   1104 	  WM_T_82546,		WMP_F_FIBER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1107 	  "Intel i82546GB 1000BASE-T Ethernet",
   1108 	  WM_T_82546_3,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1111 	  "Intel i82546GB 1000BASE-X Ethernet",
   1112 	  WM_T_82546_3,		WMP_F_FIBER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1115 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1116 	  WM_T_82546_3,		WMP_F_SERDES },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1119 	  "i82546GB quad-port Gigabit Ethernet",
   1120 	  WM_T_82546_3,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1123 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1124 	  WM_T_82546_3,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1127 	  "Intel PRO/1000MT (82546GB)",
   1128 	  WM_T_82546_3,		WMP_F_COPPER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1131 	  "Intel i82541EI 1000BASE-T Ethernet",
   1132 	  WM_T_82541,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1135 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1136 	  WM_T_82541,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1139 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1140 	  WM_T_82541,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1143 	  "Intel i82541ER 1000BASE-T Ethernet",
   1144 	  WM_T_82541_2,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1147 	  "Intel i82541GI 1000BASE-T Ethernet",
   1148 	  WM_T_82541_2,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1151 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1152 	  WM_T_82541_2,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1155 	  "Intel i82541PI 1000BASE-T Ethernet",
   1156 	  WM_T_82541_2,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1159 	  "Intel i82547EI 1000BASE-T Ethernet",
   1160 	  WM_T_82547,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1163 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1164 	  WM_T_82547,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1167 	  "Intel i82547GI 1000BASE-T Ethernet",
   1168 	  WM_T_82547_2,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1171 	  "Intel PRO/1000 PT (82571EB)",
   1172 	  WM_T_82571,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1175 	  "Intel PRO/1000 PF (82571EB)",
   1176 	  WM_T_82571,		WMP_F_FIBER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1179 	  "Intel PRO/1000 PB (82571EB)",
   1180 	  WM_T_82571,		WMP_F_SERDES },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1183 	  "Intel PRO/1000 QT (82571EB)",
   1184 	  WM_T_82571,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1187 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1188 	  WM_T_82571,		WMP_F_COPPER, },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1191 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1192 	  WM_T_82571,		WMP_F_COPPER, },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1195 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_82571,		WMP_F_SERDES, },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1199 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1200 	  WM_T_82571,		WMP_F_SERDES, },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1203 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1204 	  WM_T_82571,		WMP_F_FIBER, },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1207 	  "Intel i82572EI 1000baseT Ethernet",
   1208 	  WM_T_82572,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1211 	  "Intel i82572EI 1000baseX Ethernet",
   1212 	  WM_T_82572,		WMP_F_FIBER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1215 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1216 	  WM_T_82572,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1219 	  "Intel i82572EI 1000baseT Ethernet",
   1220 	  WM_T_82572,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1223 	  "Intel i82573E",
   1224 	  WM_T_82573,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1227 	  "Intel i82573E IAMT",
   1228 	  WM_T_82573,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1231 	  "Intel i82573L Gigabit Ethernet",
   1232 	  WM_T_82573,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1235 	  "Intel i82574L",
   1236 	  WM_T_82574,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1239 	  "Intel i82574L",
   1240 	  WM_T_82574,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1243 	  "Intel i82583V",
   1244 	  WM_T_82583,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1247 	  "i80003 dual 1000baseT Ethernet",
   1248 	  WM_T_80003,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1251 	  "i80003 dual 1000baseX Ethernet",
   1252 	  WM_T_80003,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1255 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1256 	  WM_T_80003,		WMP_F_SERDES },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1259 	  "Intel i80003 1000baseT Ethernet",
   1260 	  WM_T_80003,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1263 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1264 	  WM_T_80003,		WMP_F_SERDES },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1267 	  "Intel i82801H (M_AMT) LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1270 	  "Intel i82801H (AMT) LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1273 	  "Intel i82801H LAN Controller",
   1274 	  WM_T_ICH8,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1276 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1277 	  WM_T_ICH8,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1279 	  "Intel i82801H (M) LAN Controller",
   1280 	  WM_T_ICH8,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1282 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1283 	  WM_T_ICH8,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1285 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1286 	  WM_T_ICH8,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1288 	  "82567V-3 LAN Controller",
   1289 	  WM_T_ICH8,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1291 	  "82801I (AMT) LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1294 	  "82801I 10/100 LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1297 	  "82801I (G) 10/100 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1300 	  "82801I (GT) 10/100 LAN Controller",
   1301 	  WM_T_ICH9,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1303 	  "82801I (C) LAN Controller",
   1304 	  WM_T_ICH9,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1306 	  "82801I mobile LAN Controller",
   1307 	  WM_T_ICH9,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1309 	  "82801I mobile (V) LAN Controller",
   1310 	  WM_T_ICH9,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1312 	  "82801I mobile (AMT) LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1315 	  "82567LM-4 LAN Controller",
   1316 	  WM_T_ICH9,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1318 	  "82567LM-2 LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1321 	  "82567LF-2 LAN Controller",
   1322 	  WM_T_ICH10,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1324 	  "82567LM-3 LAN Controller",
   1325 	  WM_T_ICH10,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1327 	  "82567LF-3 LAN Controller",
   1328 	  WM_T_ICH10,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1330 	  "82567V-2 LAN Controller",
   1331 	  WM_T_ICH10,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1333 	  "82567V-3? LAN Controller",
   1334 	  WM_T_ICH10,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1336 	  "HANKSVILLE LAN Controller",
   1337 	  WM_T_ICH10,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1339 	  "PCH LAN (82577LM) Controller",
   1340 	  WM_T_PCH,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1342 	  "PCH LAN (82577LC) Controller",
   1343 	  WM_T_PCH,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1345 	  "PCH LAN (82578DM) Controller",
   1346 	  WM_T_PCH,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1348 	  "PCH LAN (82578DC) Controller",
   1349 	  WM_T_PCH,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1351 	  "PCH2 LAN (82579LM) Controller",
   1352 	  WM_T_PCH2,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1354 	  "PCH2 LAN (82579V) Controller",
   1355 	  WM_T_PCH2,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1357 	  "82575EB dual-1000baseT Ethernet",
   1358 	  WM_T_82575,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1360 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1361 	  WM_T_82575,		WMP_F_SERDES },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1363 	  "82575GB quad-1000baseT Ethernet",
   1364 	  WM_T_82575,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1366 	  "82575GB quad-1000baseT Ethernet (PM)",
   1367 	  WM_T_82575,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1369 	  "82576 1000BaseT Ethernet",
   1370 	  WM_T_82576,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1372 	  "82576 1000BaseX Ethernet",
   1373 	  WM_T_82576,		WMP_F_FIBER },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1376 	  "82576 gigabit Ethernet (SERDES)",
   1377 	  WM_T_82576,		WMP_F_SERDES },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1380 	  "82576 quad-1000BaseT Ethernet",
   1381 	  WM_T_82576,		WMP_F_COPPER },
   1382 
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1384 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1385 	  WM_T_82576,		WMP_F_COPPER },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1388 	  "82576 gigabit Ethernet",
   1389 	  WM_T_82576,		WMP_F_COPPER },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1392 	  "82576 gigabit Ethernet (SERDES)",
   1393 	  WM_T_82576,		WMP_F_SERDES },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1395 	  "82576 quad-gigabit Ethernet (SERDES)",
   1396 	  WM_T_82576,		WMP_F_SERDES },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1399 	  "82580 1000BaseT Ethernet",
   1400 	  WM_T_82580,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1402 	  "82580 1000BaseX Ethernet",
   1403 	  WM_T_82580,		WMP_F_FIBER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1406 	  "82580 1000BaseT Ethernet (SERDES)",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1410 	  "82580 gigabit Ethernet (SGMII)",
   1411 	  WM_T_82580,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1413 	  "82580 dual-1000BaseT Ethernet",
   1414 	  WM_T_82580,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1417 	  "82580 quad-1000BaseX Ethernet",
   1418 	  WM_T_82580,		WMP_F_FIBER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1421 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1422 	  WM_T_82580,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1425 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1426 	  WM_T_82580,		WMP_F_SERDES },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1429 	  "DH89XXCC 1000BASE-KX Ethernet",
   1430 	  WM_T_82580,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1433 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1434 	  WM_T_82580,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1437 	  "I350 Gigabit Network Connection",
   1438 	  WM_T_I350,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1441 	  "I350 Gigabit Fiber Network Connection",
   1442 	  WM_T_I350,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1445 	  "I350 Gigabit Backplane Connection",
   1446 	  WM_T_I350,		WMP_F_SERDES },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1449 	  "I350 Quad Port Gigabit Ethernet",
   1450 	  WM_T_I350,		WMP_F_SERDES },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1453 	  "I350 Gigabit Connection",
   1454 	  WM_T_I350,		WMP_F_COPPER },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1457 	  "I354 Gigabit Ethernet (KX)",
   1458 	  WM_T_I354,		WMP_F_SERDES },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1461 	  "I354 Gigabit Ethernet (SGMII)",
   1462 	  WM_T_I354,		WMP_F_COPPER },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1465 	  "I354 Gigabit Ethernet (2.5G)",
   1466 	  WM_T_I354,		WMP_F_COPPER },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1469 	  "I210-T1 Ethernet Server Adapter",
   1470 	  WM_T_I210,		WMP_F_COPPER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1473 	  "I210 Ethernet (Copper OEM)",
   1474 	  WM_T_I210,		WMP_F_COPPER },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1477 	  "I210 Ethernet (Copper IT)",
   1478 	  WM_T_I210,		WMP_F_COPPER },
   1479 
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1481 	  "I210 Ethernet (Copper, FLASH less)",
   1482 	  WM_T_I210,		WMP_F_COPPER },
   1483 
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1485 	  "I210 Gigabit Ethernet (Fiber)",
   1486 	  WM_T_I210,		WMP_F_FIBER },
   1487 
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1489 	  "I210 Gigabit Ethernet (SERDES)",
   1490 	  WM_T_I210,		WMP_F_SERDES },
   1491 
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1493 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1494 	  WM_T_I210,		WMP_F_SERDES },
   1495 
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1497 	  "I210 Gigabit Ethernet (SGMII)",
   1498 	  WM_T_I210,		WMP_F_COPPER },
   1499 
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1501 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1502 	  WM_T_I210,		WMP_F_COPPER },
   1503 
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1505 	  "I211 Ethernet (COPPER)",
   1506 	  WM_T_I211,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1508 	  "I217 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1511 	  "I217 LM Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1517 	  "I218 V Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1520 	  "I218 V Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1526 	  "I218 LM Ethernet Connection",
   1527 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1529 	  "I218 LM Ethernet Connection",
   1530 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1532 	  "I219 LM Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1535 	  "I219 LM Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1556 	  "I219 LM Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1559 	  "I219 V Ethernet Connection",
   1560 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1562 	  "I219 V Ethernet Connection",
   1563 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1565 	  "I219 V Ethernet Connection",
   1566 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1568 	  "I219 V Ethernet Connection",
   1569 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1571 	  "I219 V Ethernet Connection",
   1572 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1574 	  "I219 V Ethernet Connection",
   1575 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1577 	  "I219 V Ethernet Connection",
   1578 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1580 	  "I219 V Ethernet Connection",
   1581 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1582 	{ 0,			0,
   1583 	  NULL,
   1584 	  0,			0 },
   1585 };
   1586 
   1587 /*
   1588  * Register read/write functions.
   1589  * Other than CSR_{READ|WRITE}().
   1590  */
   1591 
   1592 #if 0 /* Not currently used */
   1593 static inline uint32_t
   1594 wm_io_read(struct wm_softc *sc, int reg)
   1595 {
   1596 
   1597 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1598 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1599 }
   1600 #endif
   1601 
   1602 static inline void
   1603 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1604 {
   1605 
   1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1607 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1608 }
   1609 
   1610 static inline void
   1611 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1612     uint32_t data)
   1613 {
   1614 	uint32_t regval;
   1615 	int i;
   1616 
   1617 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1618 
   1619 	CSR_WRITE(sc, reg, regval);
   1620 
   1621 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1622 		delay(5);
   1623 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1624 			break;
   1625 	}
   1626 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1627 		aprint_error("%s: WARNING:"
   1628 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1629 		    device_xname(sc->sc_dev), reg);
   1630 	}
   1631 }
   1632 
   1633 static inline void
   1634 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1635 {
   1636 	wa->wa_low = htole32(v & 0xffffffffU);
   1637 	if (sizeof(bus_addr_t) == 8)
   1638 		wa->wa_high = htole32((uint64_t) v >> 32);
   1639 	else
   1640 		wa->wa_high = 0;
   1641 }
   1642 
   1643 /*
   1644  * Descriptor sync/init functions.
   1645  */
   1646 static inline void
   1647 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1648 {
   1649 	struct wm_softc *sc = txq->txq_sc;
   1650 
   1651 	/* If it will wrap around, sync to the end of the ring. */
   1652 	if ((start + num) > WM_NTXDESC(txq)) {
   1653 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1654 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1655 		    (WM_NTXDESC(txq) - start), ops);
   1656 		num -= (WM_NTXDESC(txq) - start);
   1657 		start = 0;
   1658 	}
   1659 
   1660 	/* Now sync whatever is left. */
   1661 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1662 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1663 }
   1664 
   1665 static inline void
   1666 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1667 {
   1668 	struct wm_softc *sc = rxq->rxq_sc;
   1669 
   1670 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1671 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1672 }
   1673 
   1674 static inline void
   1675 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1676 {
   1677 	struct wm_softc *sc = rxq->rxq_sc;
   1678 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1679 	struct mbuf *m = rxs->rxs_mbuf;
   1680 
   1681 	/*
   1682 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1683 	 * so that the payload after the Ethernet header is aligned
   1684 	 * to a 4-byte boundary.
   1685 
   1686 	 * XXX BRAINDAMAGE ALERT!
   1687 	 * The stupid chip uses the same size for every buffer, which
   1688 	 * is set in the Receive Control register.  We are using the 2K
   1689 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1690 	 * reason, we can't "scoot" packets longer than the standard
   1691 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1692 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1693 	 * the upper layer copy the headers.
   1694 	 */
   1695 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1696 
   1697 	if (sc->sc_type == WM_T_82574) {
   1698 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1699 		rxd->erx_data.erxd_addr =
   1700 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1701 		rxd->erx_data.erxd_dd = 0;
   1702 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1703 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1704 
   1705 		rxd->nqrx_data.nrxd_paddr =
   1706 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1707 		/* Currently, split header is not supported. */
   1708 		rxd->nqrx_data.nrxd_haddr = 0;
   1709 	} else {
   1710 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1711 
   1712 		wm_set_dma_addr(&rxd->wrx_addr,
   1713 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1714 		rxd->wrx_len = 0;
   1715 		rxd->wrx_cksum = 0;
   1716 		rxd->wrx_status = 0;
   1717 		rxd->wrx_errors = 0;
   1718 		rxd->wrx_special = 0;
   1719 	}
   1720 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1721 
   1722 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1723 }
   1724 
   1725 /*
   1726  * Device driver interface functions and commonly used functions.
   1727  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1728  */
   1729 
   1730 /* Lookup supported device table */
   1731 static const struct wm_product *
   1732 wm_lookup(const struct pci_attach_args *pa)
   1733 {
   1734 	const struct wm_product *wmp;
   1735 
   1736 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1737 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1738 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1739 			return wmp;
   1740 	}
   1741 	return NULL;
   1742 }
   1743 
   1744 /* The match function (ca_match) */
   1745 static int
   1746 wm_match(device_t parent, cfdata_t cf, void *aux)
   1747 {
   1748 	struct pci_attach_args *pa = aux;
   1749 
   1750 	if (wm_lookup(pa) != NULL)
   1751 		return 1;
   1752 
   1753 	return 0;
   1754 }
   1755 
   1756 /* The attach function (ca_attach) */
   1757 static void
   1758 wm_attach(device_t parent, device_t self, void *aux)
   1759 {
   1760 	struct wm_softc *sc = device_private(self);
   1761 	struct pci_attach_args *pa = aux;
   1762 	prop_dictionary_t dict;
   1763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1764 	pci_chipset_tag_t pc = pa->pa_pc;
   1765 	int counts[PCI_INTR_TYPE_SIZE];
   1766 	pci_intr_type_t max_type;
   1767 	const char *eetype, *xname;
   1768 	bus_space_tag_t memt;
   1769 	bus_space_handle_t memh;
   1770 	bus_size_t memsize;
   1771 	int memh_valid;
   1772 	int i, error;
   1773 	const struct wm_product *wmp;
   1774 	prop_data_t ea;
   1775 	prop_number_t pn;
   1776 	uint8_t enaddr[ETHER_ADDR_LEN];
   1777 	char buf[256];
   1778 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1779 	pcireg_t preg, memtype;
   1780 	uint16_t eeprom_data, apme_mask;
   1781 	bool force_clear_smbi;
   1782 	uint32_t link_mode;
   1783 	uint32_t reg;
   1784 
   1785 	sc->sc_dev = self;
   1786 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1787 	sc->sc_core_stopping = false;
   1788 
   1789 	wmp = wm_lookup(pa);
   1790 #ifdef DIAGNOSTIC
   1791 	if (wmp == NULL) {
   1792 		printf("\n");
   1793 		panic("wm_attach: impossible");
   1794 	}
   1795 #endif
   1796 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1797 
   1798 	sc->sc_pc = pa->pa_pc;
   1799 	sc->sc_pcitag = pa->pa_tag;
   1800 
   1801 	if (pci_dma64_available(pa))
   1802 		sc->sc_dmat = pa->pa_dmat64;
   1803 	else
   1804 		sc->sc_dmat = pa->pa_dmat;
   1805 
   1806 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1807 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1808 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1809 
   1810 	sc->sc_type = wmp->wmp_type;
   1811 
   1812 	/* Set default function pointers */
   1813 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1814 	sc->phy.release = sc->nvm.release = wm_put_null;
   1815 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1816 
   1817 	if (sc->sc_type < WM_T_82543) {
   1818 		if (sc->sc_rev < 2) {
   1819 			aprint_error_dev(sc->sc_dev,
   1820 			    "i82542 must be at least rev. 2\n");
   1821 			return;
   1822 		}
   1823 		if (sc->sc_rev < 3)
   1824 			sc->sc_type = WM_T_82542_2_0;
   1825 	}
   1826 
   1827 	/*
   1828 	 * Disable MSI for Errata:
   1829 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1830 	 *
   1831 	 *  82544: Errata 25
   1832 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1833 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1834 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1835 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1836 	 *
   1837 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1838 	 *
   1839 	 *  82571 & 82572: Errata 63
   1840 	 */
   1841 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1842 	    || (sc->sc_type == WM_T_82572))
   1843 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1844 
   1845 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1846 	    || (sc->sc_type == WM_T_82580)
   1847 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1848 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1849 		sc->sc_flags |= WM_F_NEWQUEUE;
   1850 
   1851 	/* Set device properties (mactype) */
   1852 	dict = device_properties(sc->sc_dev);
   1853 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1854 
   1855 	/*
   1856 	 * Map the device.  All devices support memory-mapped acccess,
   1857 	 * and it is really required for normal operation.
   1858 	 */
   1859 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1860 	switch (memtype) {
   1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1862 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1863 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1864 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1865 		break;
   1866 	default:
   1867 		memh_valid = 0;
   1868 		break;
   1869 	}
   1870 
   1871 	if (memh_valid) {
   1872 		sc->sc_st = memt;
   1873 		sc->sc_sh = memh;
   1874 		sc->sc_ss = memsize;
   1875 	} else {
   1876 		aprint_error_dev(sc->sc_dev,
   1877 		    "unable to map device registers\n");
   1878 		return;
   1879 	}
   1880 
   1881 	/*
   1882 	 * In addition, i82544 and later support I/O mapped indirect
   1883 	 * register access.  It is not desirable (nor supported in
   1884 	 * this driver) to use it for normal operation, though it is
   1885 	 * required to work around bugs in some chip versions.
   1886 	 */
   1887 	if (sc->sc_type >= WM_T_82544) {
   1888 		/* First we have to find the I/O BAR. */
   1889 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1890 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1891 			if (memtype == PCI_MAPREG_TYPE_IO)
   1892 				break;
   1893 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1894 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1895 				i += 4;	/* skip high bits, too */
   1896 		}
   1897 		if (i < PCI_MAPREG_END) {
   1898 			/*
   1899 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1900 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1901 			 * It's no problem because newer chips has no this
   1902 			 * bug.
   1903 			 *
   1904 			 * The i8254x doesn't apparently respond when the
   1905 			 * I/O BAR is 0, which looks somewhat like it's not
   1906 			 * been configured.
   1907 			 */
   1908 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1909 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1910 				aprint_error_dev(sc->sc_dev,
   1911 				    "WARNING: I/O BAR at zero.\n");
   1912 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1913 					0, &sc->sc_iot, &sc->sc_ioh,
   1914 					NULL, &sc->sc_ios) == 0) {
   1915 				sc->sc_flags |= WM_F_IOH_VALID;
   1916 			} else
   1917 				aprint_error_dev(sc->sc_dev,
   1918 				    "WARNING: unable to map I/O space\n");
   1919 		}
   1920 
   1921 	}
   1922 
   1923 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1924 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1925 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1926 	if (sc->sc_type < WM_T_82542_2_1)
   1927 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1928 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1929 
   1930 	/* Power up chip */
   1931 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1932 	    && error != EOPNOTSUPP) {
   1933 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1934 		return;
   1935 	}
   1936 
   1937 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1938 	/*
   1939 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1940 	 * resource.
   1941 	 */
   1942 	if (sc->sc_nqueues > 1) {
   1943 		max_type = PCI_INTR_TYPE_MSIX;
   1944 		/*
   1945 		 *  82583 has a MSI-X capability in the PCI configuration space
   1946 		 * but it doesn't support it. At least the document doesn't
   1947 		 * say anything about MSI-X.
   1948 		 */
   1949 		counts[PCI_INTR_TYPE_MSIX]
   1950 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1951 	} else {
   1952 		max_type = PCI_INTR_TYPE_MSI;
   1953 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1954 	}
   1955 
   1956 	/* Allocation settings */
   1957 	counts[PCI_INTR_TYPE_MSI] = 1;
   1958 	counts[PCI_INTR_TYPE_INTX] = 1;
   1959 	/* overridden by disable flags */
   1960 	if (wm_disable_msi != 0) {
   1961 		counts[PCI_INTR_TYPE_MSI] = 0;
   1962 		if (wm_disable_msix != 0) {
   1963 			max_type = PCI_INTR_TYPE_INTX;
   1964 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1965 		}
   1966 	} else if (wm_disable_msix != 0) {
   1967 		max_type = PCI_INTR_TYPE_MSI;
   1968 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1969 	}
   1970 
   1971 alloc_retry:
   1972 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1973 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1974 		return;
   1975 	}
   1976 
   1977 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1978 		error = wm_setup_msix(sc);
   1979 		if (error) {
   1980 			pci_intr_release(pc, sc->sc_intrs,
   1981 			    counts[PCI_INTR_TYPE_MSIX]);
   1982 
   1983 			/* Setup for MSI: Disable MSI-X */
   1984 			max_type = PCI_INTR_TYPE_MSI;
   1985 			counts[PCI_INTR_TYPE_MSI] = 1;
   1986 			counts[PCI_INTR_TYPE_INTX] = 1;
   1987 			goto alloc_retry;
   1988 		}
   1989 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1990 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   1991 		error = wm_setup_legacy(sc);
   1992 		if (error) {
   1993 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1994 			    counts[PCI_INTR_TYPE_MSI]);
   1995 
   1996 			/* The next try is for INTx: Disable MSI */
   1997 			max_type = PCI_INTR_TYPE_INTX;
   1998 			counts[PCI_INTR_TYPE_INTX] = 1;
   1999 			goto alloc_retry;
   2000 		}
   2001 	} else {
   2002 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2003 		error = wm_setup_legacy(sc);
   2004 		if (error) {
   2005 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2006 			    counts[PCI_INTR_TYPE_INTX]);
   2007 			return;
   2008 		}
   2009 	}
   2010 
   2011 	/*
   2012 	 * Check the function ID (unit number of the chip).
   2013 	 */
   2014 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2015 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2016 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2017 	    || (sc->sc_type == WM_T_82580)
   2018 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2019 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2020 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2021 	else
   2022 		sc->sc_funcid = 0;
   2023 
   2024 	/*
   2025 	 * Determine a few things about the bus we're connected to.
   2026 	 */
   2027 	if (sc->sc_type < WM_T_82543) {
   2028 		/* We don't really know the bus characteristics here. */
   2029 		sc->sc_bus_speed = 33;
   2030 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2031 		/*
   2032 		 * CSA (Communication Streaming Architecture) is about as fast
   2033 		 * a 32-bit 66MHz PCI Bus.
   2034 		 */
   2035 		sc->sc_flags |= WM_F_CSA;
   2036 		sc->sc_bus_speed = 66;
   2037 		aprint_verbose_dev(sc->sc_dev,
   2038 		    "Communication Streaming Architecture\n");
   2039 		if (sc->sc_type == WM_T_82547) {
   2040 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2041 			callout_setfunc(&sc->sc_txfifo_ch,
   2042 			    wm_82547_txfifo_stall, sc);
   2043 			aprint_verbose_dev(sc->sc_dev,
   2044 			    "using 82547 Tx FIFO stall work-around\n");
   2045 		}
   2046 	} else if (sc->sc_type >= WM_T_82571) {
   2047 		sc->sc_flags |= WM_F_PCIE;
   2048 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2049 		    && (sc->sc_type != WM_T_ICH10)
   2050 		    && (sc->sc_type != WM_T_PCH)
   2051 		    && (sc->sc_type != WM_T_PCH2)
   2052 		    && (sc->sc_type != WM_T_PCH_LPT)
   2053 		    && (sc->sc_type != WM_T_PCH_SPT)
   2054 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2055 			/* ICH* and PCH* have no PCIe capability registers */
   2056 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2057 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2058 				NULL) == 0)
   2059 				aprint_error_dev(sc->sc_dev,
   2060 				    "unable to find PCIe capability\n");
   2061 		}
   2062 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2063 	} else {
   2064 		reg = CSR_READ(sc, WMREG_STATUS);
   2065 		if (reg & STATUS_BUS64)
   2066 			sc->sc_flags |= WM_F_BUS64;
   2067 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2068 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2069 
   2070 			sc->sc_flags |= WM_F_PCIX;
   2071 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2072 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2073 				aprint_error_dev(sc->sc_dev,
   2074 				    "unable to find PCIX capability\n");
   2075 			else if (sc->sc_type != WM_T_82545_3 &&
   2076 				 sc->sc_type != WM_T_82546_3) {
   2077 				/*
   2078 				 * Work around a problem caused by the BIOS
   2079 				 * setting the max memory read byte count
   2080 				 * incorrectly.
   2081 				 */
   2082 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2083 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2084 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2085 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2086 
   2087 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2088 				    PCIX_CMD_BYTECNT_SHIFT;
   2089 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2090 				    PCIX_STATUS_MAXB_SHIFT;
   2091 				if (bytecnt > maxb) {
   2092 					aprint_verbose_dev(sc->sc_dev,
   2093 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2094 					    512 << bytecnt, 512 << maxb);
   2095 					pcix_cmd = (pcix_cmd &
   2096 					    ~PCIX_CMD_BYTECNT_MASK) |
   2097 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2098 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2099 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2100 					    pcix_cmd);
   2101 				}
   2102 			}
   2103 		}
   2104 		/*
   2105 		 * The quad port adapter is special; it has a PCIX-PCIX
   2106 		 * bridge on the board, and can run the secondary bus at
   2107 		 * a higher speed.
   2108 		 */
   2109 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2110 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2111 								      : 66;
   2112 		} else if (sc->sc_flags & WM_F_PCIX) {
   2113 			switch (reg & STATUS_PCIXSPD_MASK) {
   2114 			case STATUS_PCIXSPD_50_66:
   2115 				sc->sc_bus_speed = 66;
   2116 				break;
   2117 			case STATUS_PCIXSPD_66_100:
   2118 				sc->sc_bus_speed = 100;
   2119 				break;
   2120 			case STATUS_PCIXSPD_100_133:
   2121 				sc->sc_bus_speed = 133;
   2122 				break;
   2123 			default:
   2124 				aprint_error_dev(sc->sc_dev,
   2125 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2126 				    reg & STATUS_PCIXSPD_MASK);
   2127 				sc->sc_bus_speed = 66;
   2128 				break;
   2129 			}
   2130 		} else
   2131 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2132 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2133 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2134 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2135 	}
   2136 
   2137 	/* clear interesting stat counters */
   2138 	CSR_READ(sc, WMREG_COLC);
   2139 	CSR_READ(sc, WMREG_RXERRC);
   2140 
   2141 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2142 	    || (sc->sc_type >= WM_T_ICH8))
   2143 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2144 	if (sc->sc_type >= WM_T_ICH8)
   2145 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2146 
   2147 	/* Set PHY, NVM mutex related stuff */
   2148 	switch (sc->sc_type) {
   2149 	case WM_T_82542_2_0:
   2150 	case WM_T_82542_2_1:
   2151 	case WM_T_82543:
   2152 	case WM_T_82544:
   2153 		/* Microwire */
   2154 		sc->nvm.read = wm_nvm_read_uwire;
   2155 		sc->sc_nvm_wordsize = 64;
   2156 		sc->sc_nvm_addrbits = 6;
   2157 		break;
   2158 	case WM_T_82540:
   2159 	case WM_T_82545:
   2160 	case WM_T_82545_3:
   2161 	case WM_T_82546:
   2162 	case WM_T_82546_3:
   2163 		/* Microwire */
   2164 		sc->nvm.read = wm_nvm_read_uwire;
   2165 		reg = CSR_READ(sc, WMREG_EECD);
   2166 		if (reg & EECD_EE_SIZE) {
   2167 			sc->sc_nvm_wordsize = 256;
   2168 			sc->sc_nvm_addrbits = 8;
   2169 		} else {
   2170 			sc->sc_nvm_wordsize = 64;
   2171 			sc->sc_nvm_addrbits = 6;
   2172 		}
   2173 		sc->sc_flags |= WM_F_LOCK_EECD;
   2174 		sc->nvm.acquire = wm_get_eecd;
   2175 		sc->nvm.release = wm_put_eecd;
   2176 		break;
   2177 	case WM_T_82541:
   2178 	case WM_T_82541_2:
   2179 	case WM_T_82547:
   2180 	case WM_T_82547_2:
   2181 		reg = CSR_READ(sc, WMREG_EECD);
   2182 		/*
   2183 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2184 		 * on 8254[17], so set flags and functios before calling it.
   2185 		 */
   2186 		sc->sc_flags |= WM_F_LOCK_EECD;
   2187 		sc->nvm.acquire = wm_get_eecd;
   2188 		sc->nvm.release = wm_put_eecd;
   2189 		if (reg & EECD_EE_TYPE) {
   2190 			/* SPI */
   2191 			sc->nvm.read = wm_nvm_read_spi;
   2192 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2193 			wm_nvm_set_addrbits_size_eecd(sc);
   2194 		} else {
   2195 			/* Microwire */
   2196 			sc->nvm.read = wm_nvm_read_uwire;
   2197 			if ((reg & EECD_EE_ABITS) != 0) {
   2198 				sc->sc_nvm_wordsize = 256;
   2199 				sc->sc_nvm_addrbits = 8;
   2200 			} else {
   2201 				sc->sc_nvm_wordsize = 64;
   2202 				sc->sc_nvm_addrbits = 6;
   2203 			}
   2204 		}
   2205 		break;
   2206 	case WM_T_82571:
   2207 	case WM_T_82572:
   2208 		/* SPI */
   2209 		sc->nvm.read = wm_nvm_read_eerd;
   2210 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2211 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2212 		wm_nvm_set_addrbits_size_eecd(sc);
   2213 		sc->phy.acquire = wm_get_swsm_semaphore;
   2214 		sc->phy.release = wm_put_swsm_semaphore;
   2215 		sc->nvm.acquire = wm_get_nvm_82571;
   2216 		sc->nvm.release = wm_put_nvm_82571;
   2217 		break;
   2218 	case WM_T_82573:
   2219 	case WM_T_82574:
   2220 	case WM_T_82583:
   2221 		sc->nvm.read = wm_nvm_read_eerd;
   2222 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2223 		if (sc->sc_type == WM_T_82573) {
   2224 			sc->phy.acquire = wm_get_swsm_semaphore;
   2225 			sc->phy.release = wm_put_swsm_semaphore;
   2226 			sc->nvm.acquire = wm_get_nvm_82571;
   2227 			sc->nvm.release = wm_put_nvm_82571;
   2228 		} else {
   2229 			/* Both PHY and NVM use the same semaphore. */
   2230 			sc->phy.acquire = sc->nvm.acquire
   2231 			    = wm_get_swfwhw_semaphore;
   2232 			sc->phy.release = sc->nvm.release
   2233 			    = wm_put_swfwhw_semaphore;
   2234 		}
   2235 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2236 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2237 			sc->sc_nvm_wordsize = 2048;
   2238 		} else {
   2239 			/* SPI */
   2240 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2241 			wm_nvm_set_addrbits_size_eecd(sc);
   2242 		}
   2243 		break;
   2244 	case WM_T_82575:
   2245 	case WM_T_82576:
   2246 	case WM_T_82580:
   2247 	case WM_T_I350:
   2248 	case WM_T_I354:
   2249 	case WM_T_80003:
   2250 		/* SPI */
   2251 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2252 		wm_nvm_set_addrbits_size_eecd(sc);
   2253 		if ((sc->sc_type == WM_T_80003)
   2254 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2255 			sc->nvm.read = wm_nvm_read_eerd;
   2256 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2257 		} else {
   2258 			sc->nvm.read = wm_nvm_read_spi;
   2259 			sc->sc_flags |= WM_F_LOCK_EECD;
   2260 		}
   2261 		sc->phy.acquire = wm_get_phy_82575;
   2262 		sc->phy.release = wm_put_phy_82575;
   2263 		sc->nvm.acquire = wm_get_nvm_80003;
   2264 		sc->nvm.release = wm_put_nvm_80003;
   2265 		break;
   2266 	case WM_T_ICH8:
   2267 	case WM_T_ICH9:
   2268 	case WM_T_ICH10:
   2269 	case WM_T_PCH:
   2270 	case WM_T_PCH2:
   2271 	case WM_T_PCH_LPT:
   2272 		sc->nvm.read = wm_nvm_read_ich8;
   2273 		/* FLASH */
   2274 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2275 		sc->sc_nvm_wordsize = 2048;
   2276 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2277 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2278 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2279 			aprint_error_dev(sc->sc_dev,
   2280 			    "can't map FLASH registers\n");
   2281 			goto out;
   2282 		}
   2283 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2284 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2285 		    ICH_FLASH_SECTOR_SIZE;
   2286 		sc->sc_ich8_flash_bank_size =
   2287 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2288 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2289 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2290 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2291 		sc->sc_flashreg_offset = 0;
   2292 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2293 		sc->phy.release = wm_put_swflag_ich8lan;
   2294 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2295 		sc->nvm.release = wm_put_nvm_ich8lan;
   2296 		break;
   2297 	case WM_T_PCH_SPT:
   2298 	case WM_T_PCH_CNP:
   2299 		sc->nvm.read = wm_nvm_read_spt;
   2300 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2301 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2302 		sc->sc_flasht = sc->sc_st;
   2303 		sc->sc_flashh = sc->sc_sh;
   2304 		sc->sc_ich8_flash_base = 0;
   2305 		sc->sc_nvm_wordsize =
   2306 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2307 		    * NVM_SIZE_MULTIPLIER;
   2308 		/* It is size in bytes, we want words */
   2309 		sc->sc_nvm_wordsize /= 2;
   2310 		/* Assume 2 banks */
   2311 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2312 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2313 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2314 		sc->phy.release = wm_put_swflag_ich8lan;
   2315 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2316 		sc->nvm.release = wm_put_nvm_ich8lan;
   2317 		break;
   2318 	case WM_T_I210:
   2319 	case WM_T_I211:
   2320 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2321 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2322 		if (wm_nvm_flash_presence_i210(sc)) {
   2323 			sc->nvm.read = wm_nvm_read_eerd;
   2324 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2325 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2326 			wm_nvm_set_addrbits_size_eecd(sc);
   2327 		} else {
   2328 			sc->nvm.read = wm_nvm_read_invm;
   2329 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2330 			sc->sc_nvm_wordsize = INVM_SIZE;
   2331 		}
   2332 		sc->phy.acquire = wm_get_phy_82575;
   2333 		sc->phy.release = wm_put_phy_82575;
   2334 		sc->nvm.acquire = wm_get_nvm_80003;
   2335 		sc->nvm.release = wm_put_nvm_80003;
   2336 		break;
   2337 	default:
   2338 		break;
   2339 	}
   2340 
   2341 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2342 	switch (sc->sc_type) {
   2343 	case WM_T_82571:
   2344 	case WM_T_82572:
   2345 		reg = CSR_READ(sc, WMREG_SWSM2);
   2346 		if ((reg & SWSM2_LOCK) == 0) {
   2347 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2348 			force_clear_smbi = true;
   2349 		} else
   2350 			force_clear_smbi = false;
   2351 		break;
   2352 	case WM_T_82573:
   2353 	case WM_T_82574:
   2354 	case WM_T_82583:
   2355 		force_clear_smbi = true;
   2356 		break;
   2357 	default:
   2358 		force_clear_smbi = false;
   2359 		break;
   2360 	}
   2361 	if (force_clear_smbi) {
   2362 		reg = CSR_READ(sc, WMREG_SWSM);
   2363 		if ((reg & SWSM_SMBI) != 0)
   2364 			aprint_error_dev(sc->sc_dev,
   2365 			    "Please update the Bootagent\n");
   2366 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2367 	}
   2368 
   2369 	/*
   2370 	 * Defer printing the EEPROM type until after verifying the checksum
   2371 	 * This allows the EEPROM type to be printed correctly in the case
   2372 	 * that no EEPROM is attached.
   2373 	 */
   2374 	/*
   2375 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2376 	 * this for later, so we can fail future reads from the EEPROM.
   2377 	 */
   2378 	if (wm_nvm_validate_checksum(sc)) {
   2379 		/*
   2380 		 * Read twice again because some PCI-e parts fail the
   2381 		 * first check due to the link being in sleep state.
   2382 		 */
   2383 		if (wm_nvm_validate_checksum(sc))
   2384 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2385 	}
   2386 
   2387 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2388 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2389 	else {
   2390 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2391 		    sc->sc_nvm_wordsize);
   2392 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2393 			aprint_verbose("iNVM");
   2394 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2395 			aprint_verbose("FLASH(HW)");
   2396 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2397 			aprint_verbose("FLASH");
   2398 		else {
   2399 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2400 				eetype = "SPI";
   2401 			else
   2402 				eetype = "MicroWire";
   2403 			aprint_verbose("(%d address bits) %s EEPROM",
   2404 			    sc->sc_nvm_addrbits, eetype);
   2405 		}
   2406 	}
   2407 	wm_nvm_version(sc);
   2408 	aprint_verbose("\n");
   2409 
   2410 	/*
   2411 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2412 	 * incorrect.
   2413 	 */
   2414 	wm_gmii_setup_phytype(sc, 0, 0);
   2415 
   2416 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2417 	switch (sc->sc_type) {
   2418 	case WM_T_ICH8:
   2419 	case WM_T_ICH9:
   2420 	case WM_T_ICH10:
   2421 	case WM_T_PCH:
   2422 	case WM_T_PCH2:
   2423 	case WM_T_PCH_LPT:
   2424 	case WM_T_PCH_SPT:
   2425 	case WM_T_PCH_CNP:
   2426 		apme_mask = WUC_APME;
   2427 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2428 		if ((eeprom_data & apme_mask) != 0)
   2429 			sc->sc_flags |= WM_F_WOL;
   2430 		break;
   2431 	default:
   2432 		break;
   2433 	}
   2434 
   2435 	/* Reset the chip to a known state. */
   2436 	wm_reset(sc);
   2437 
   2438 	/*
   2439 	 * Check for I21[01] PLL workaround.
   2440 	 *
   2441 	 * Three cases:
   2442 	 * a) Chip is I211.
   2443 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2444 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2445 	 */
   2446 	if (sc->sc_type == WM_T_I211)
   2447 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2448 	if (sc->sc_type == WM_T_I210) {
   2449 		if (!wm_nvm_flash_presence_i210(sc))
   2450 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2451 		else if ((sc->sc_nvm_ver_major < 3)
   2452 		    || ((sc->sc_nvm_ver_major == 3)
   2453 			&& (sc->sc_nvm_ver_minor < 25))) {
   2454 			aprint_verbose_dev(sc->sc_dev,
   2455 			    "ROM image version %d.%d is older than 3.25\n",
   2456 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2457 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2458 		}
   2459 	}
   2460 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2461 		wm_pll_workaround_i210(sc);
   2462 
   2463 	wm_get_wakeup(sc);
   2464 
   2465 	/* Non-AMT based hardware can now take control from firmware */
   2466 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2467 		wm_get_hw_control(sc);
   2468 
   2469 	/*
   2470 	 * Read the Ethernet address from the EEPROM, if not first found
   2471 	 * in device properties.
   2472 	 */
   2473 	ea = prop_dictionary_get(dict, "mac-address");
   2474 	if (ea != NULL) {
   2475 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2476 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2477 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2478 	} else {
   2479 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2480 			aprint_error_dev(sc->sc_dev,
   2481 			    "unable to read Ethernet address\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2487 	    ether_sprintf(enaddr));
   2488 
   2489 	/*
   2490 	 * Read the config info from the EEPROM, and set up various
   2491 	 * bits in the control registers based on their contents.
   2492 	 */
   2493 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2494 	if (pn != NULL) {
   2495 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2496 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2497 	} else {
   2498 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2499 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2500 			goto out;
   2501 		}
   2502 	}
   2503 
   2504 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2505 	if (pn != NULL) {
   2506 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2507 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2508 	} else {
   2509 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2510 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2511 			goto out;
   2512 		}
   2513 	}
   2514 
   2515 	/* check for WM_F_WOL */
   2516 	switch (sc->sc_type) {
   2517 	case WM_T_82542_2_0:
   2518 	case WM_T_82542_2_1:
   2519 	case WM_T_82543:
   2520 		/* dummy? */
   2521 		eeprom_data = 0;
   2522 		apme_mask = NVM_CFG3_APME;
   2523 		break;
   2524 	case WM_T_82544:
   2525 		apme_mask = NVM_CFG2_82544_APM_EN;
   2526 		eeprom_data = cfg2;
   2527 		break;
   2528 	case WM_T_82546:
   2529 	case WM_T_82546_3:
   2530 	case WM_T_82571:
   2531 	case WM_T_82572:
   2532 	case WM_T_82573:
   2533 	case WM_T_82574:
   2534 	case WM_T_82583:
   2535 	case WM_T_80003:
   2536 	case WM_T_82575:
   2537 	case WM_T_82576:
   2538 		apme_mask = NVM_CFG3_APME;
   2539 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2540 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2541 		break;
   2542 	case WM_T_82580:
   2543 	case WM_T_I350:
   2544 	case WM_T_I354:
   2545 	case WM_T_I210:
   2546 	case WM_T_I211:
   2547 		apme_mask = NVM_CFG3_APME;
   2548 		wm_nvm_read(sc,
   2549 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2550 		    1, &eeprom_data);
   2551 		break;
   2552 	case WM_T_ICH8:
   2553 	case WM_T_ICH9:
   2554 	case WM_T_ICH10:
   2555 	case WM_T_PCH:
   2556 	case WM_T_PCH2:
   2557 	case WM_T_PCH_LPT:
   2558 	case WM_T_PCH_SPT:
   2559 	case WM_T_PCH_CNP:
   2560 		/* Already checked before wm_reset () */
   2561 		apme_mask = eeprom_data = 0;
   2562 		break;
   2563 	default: /* XXX 82540 */
   2564 		apme_mask = NVM_CFG3_APME;
   2565 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2566 		break;
   2567 	}
   2568 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2569 	if ((eeprom_data & apme_mask) != 0)
   2570 		sc->sc_flags |= WM_F_WOL;
   2571 
   2572 	/*
   2573 	 * We have the eeprom settings, now apply the special cases
   2574 	 * where the eeprom may be wrong or the board won't support
   2575 	 * wake on lan on a particular port
   2576 	 */
   2577 	switch (sc->sc_pcidevid) {
   2578 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2579 		sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2582 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2583 		/* Wake events only supported on port A for dual fiber
   2584 		 * regardless of eeprom setting */
   2585 		if (sc->sc_funcid == 1)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2589 		/* If quad port adapter, disable WoL on all but port A */
   2590 		if (sc->sc_funcid != 0)
   2591 			sc->sc_flags &= ~WM_F_WOL;
   2592 		break;
   2593 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2594 		/* Wake events only supported on port A for dual fiber
   2595 		 * regardless of eeprom setting */
   2596 		if (sc->sc_funcid == 1)
   2597 			sc->sc_flags &= ~WM_F_WOL;
   2598 		break;
   2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2600 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2601 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2602 		/* If quad port adapter, disable WoL on all but port A */
   2603 		if (sc->sc_funcid != 0)
   2604 			sc->sc_flags &= ~WM_F_WOL;
   2605 		break;
   2606 	}
   2607 
   2608 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2609 		/* Check NVM for autonegotiation */
   2610 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2611 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2612 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2613 		}
   2614 	}
   2615 
   2616 	/*
   2617 	 * XXX need special handling for some multiple port cards
   2618 	 * to disable a paticular port.
   2619 	 */
   2620 
   2621 	if (sc->sc_type >= WM_T_82544) {
   2622 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2623 		if (pn != NULL) {
   2624 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2625 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2626 		} else {
   2627 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2628 				aprint_error_dev(sc->sc_dev,
   2629 				    "unable to read SWDPIN\n");
   2630 				goto out;
   2631 			}
   2632 		}
   2633 	}
   2634 
   2635 	if (cfg1 & NVM_CFG1_ILOS)
   2636 		sc->sc_ctrl |= CTRL_ILOS;
   2637 
   2638 	/*
   2639 	 * XXX
   2640 	 * This code isn't correct because pin 2 and 3 are located
   2641 	 * in different position on newer chips. Check all datasheet.
   2642 	 *
   2643 	 * Until resolve this problem, check if a chip < 82580
   2644 	 */
   2645 	if (sc->sc_type <= WM_T_82580) {
   2646 		if (sc->sc_type >= WM_T_82544) {
   2647 			sc->sc_ctrl |=
   2648 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2649 			    CTRL_SWDPIO_SHIFT;
   2650 			sc->sc_ctrl |=
   2651 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2652 			    CTRL_SWDPINS_SHIFT;
   2653 		} else {
   2654 			sc->sc_ctrl |=
   2655 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2656 			    CTRL_SWDPIO_SHIFT;
   2657 		}
   2658 	}
   2659 
   2660 	/* XXX For other than 82580? */
   2661 	if (sc->sc_type == WM_T_82580) {
   2662 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2663 		if (nvmword & __BIT(13))
   2664 			sc->sc_ctrl |= CTRL_ILOS;
   2665 	}
   2666 
   2667 #if 0
   2668 	if (sc->sc_type >= WM_T_82544) {
   2669 		if (cfg1 & NVM_CFG1_IPS0)
   2670 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2671 		if (cfg1 & NVM_CFG1_IPS1)
   2672 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2673 		sc->sc_ctrl_ext |=
   2674 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2675 		    CTRL_EXT_SWDPIO_SHIFT;
   2676 		sc->sc_ctrl_ext |=
   2677 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2678 		    CTRL_EXT_SWDPINS_SHIFT;
   2679 	} else {
   2680 		sc->sc_ctrl_ext |=
   2681 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2682 		    CTRL_EXT_SWDPIO_SHIFT;
   2683 	}
   2684 #endif
   2685 
   2686 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2687 #if 0
   2688 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2689 #endif
   2690 
   2691 	if (sc->sc_type == WM_T_PCH) {
   2692 		uint16_t val;
   2693 
   2694 		/* Save the NVM K1 bit setting */
   2695 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2696 
   2697 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2698 			sc->sc_nvm_k1_enabled = 1;
   2699 		else
   2700 			sc->sc_nvm_k1_enabled = 0;
   2701 	}
   2702 
   2703 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2704 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2705 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2706 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2707 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2708 	    || sc->sc_type == WM_T_82573
   2709 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2710 		/* Copper only */
   2711 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2712 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2713 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2714 	    || (sc->sc_type ==WM_T_I211)) {
   2715 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2716 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2717 		switch (link_mode) {
   2718 		case CTRL_EXT_LINK_MODE_1000KX:
   2719 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2720 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2721 			break;
   2722 		case CTRL_EXT_LINK_MODE_SGMII:
   2723 			if (wm_sgmii_uses_mdio(sc)) {
   2724 				aprint_verbose_dev(sc->sc_dev,
   2725 				    "SGMII(MDIO)\n");
   2726 				sc->sc_flags |= WM_F_SGMII;
   2727 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2728 				break;
   2729 			}
   2730 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2731 			/*FALLTHROUGH*/
   2732 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2733 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2734 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2735 				if (link_mode
   2736 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2737 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2738 					sc->sc_flags |= WM_F_SGMII;
   2739 				} else {
   2740 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2741 					aprint_verbose_dev(sc->sc_dev,
   2742 					    "SERDES\n");
   2743 				}
   2744 				break;
   2745 			}
   2746 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2747 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2748 
   2749 			/* Change current link mode setting */
   2750 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2751 			switch (sc->sc_mediatype) {
   2752 			case WM_MEDIATYPE_COPPER:
   2753 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2754 				break;
   2755 			case WM_MEDIATYPE_SERDES:
   2756 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2757 				break;
   2758 			default:
   2759 				break;
   2760 			}
   2761 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2762 			break;
   2763 		case CTRL_EXT_LINK_MODE_GMII:
   2764 		default:
   2765 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2766 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2767 			break;
   2768 		}
   2769 
   2770 		reg &= ~CTRL_EXT_I2C_ENA;
   2771 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2772 			reg |= CTRL_EXT_I2C_ENA;
   2773 		else
   2774 			reg &= ~CTRL_EXT_I2C_ENA;
   2775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2776 	} else if (sc->sc_type < WM_T_82543 ||
   2777 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2778 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2779 			aprint_error_dev(sc->sc_dev,
   2780 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2781 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2782 		}
   2783 	} else {
   2784 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2785 			aprint_error_dev(sc->sc_dev,
   2786 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2787 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2788 		}
   2789 	}
   2790 
   2791 	if (sc->sc_type >= WM_T_PCH2)
   2792 		sc->sc_flags |= WM_F_EEE;
   2793 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2794 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2795 		/* XXX: Need special handling for I354. (not yet) */
   2796 		if (sc->sc_type != WM_T_I354)
   2797 			sc->sc_flags |= WM_F_EEE;
   2798 	}
   2799 
   2800 	/* Set device properties (macflags) */
   2801 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2802 
   2803 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2804 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2805 
   2806 	/* Initialize the media structures accordingly. */
   2807 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2808 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2809 	else
   2810 		wm_tbi_mediainit(sc); /* All others */
   2811 
   2812 	ifp = &sc->sc_ethercom.ec_if;
   2813 	xname = device_xname(sc->sc_dev);
   2814 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2815 	ifp->if_softc = sc;
   2816 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2817 #ifdef WM_MPSAFE
   2818 	ifp->if_extflags = IFEF_MPSAFE;
   2819 #endif
   2820 	ifp->if_ioctl = wm_ioctl;
   2821 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2822 		ifp->if_start = wm_nq_start;
   2823 		/*
   2824 		 * When the number of CPUs is one and the controller can use
   2825 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2826 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2827 		 * and the other is used for link status changing.
   2828 		 * In this situation, wm_nq_transmit() is disadvantageous
   2829 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2830 		 */
   2831 		if (wm_is_using_multiqueue(sc))
   2832 			ifp->if_transmit = wm_nq_transmit;
   2833 	} else {
   2834 		ifp->if_start = wm_start;
   2835 		/*
   2836 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2837 		 */
   2838 		if (wm_is_using_multiqueue(sc))
   2839 			ifp->if_transmit = wm_transmit;
   2840 	}
   2841 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2842 	ifp->if_init = wm_init;
   2843 	ifp->if_stop = wm_stop;
   2844 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2845 	IFQ_SET_READY(&ifp->if_snd);
   2846 
   2847 	/* Check for jumbo frame */
   2848 	switch (sc->sc_type) {
   2849 	case WM_T_82573:
   2850 		/* XXX limited to 9234 if ASPM is disabled */
   2851 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2852 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2853 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2854 		break;
   2855 	case WM_T_82571:
   2856 	case WM_T_82572:
   2857 	case WM_T_82574:
   2858 	case WM_T_82583:
   2859 	case WM_T_82575:
   2860 	case WM_T_82576:
   2861 	case WM_T_82580:
   2862 	case WM_T_I350:
   2863 	case WM_T_I354:
   2864 	case WM_T_I210:
   2865 	case WM_T_I211:
   2866 	case WM_T_80003:
   2867 	case WM_T_ICH9:
   2868 	case WM_T_ICH10:
   2869 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2870 	case WM_T_PCH_LPT:
   2871 	case WM_T_PCH_SPT:
   2872 	case WM_T_PCH_CNP:
   2873 		/* XXX limited to 9234 */
   2874 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2875 		break;
   2876 	case WM_T_PCH:
   2877 		/* XXX limited to 4096 */
   2878 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2879 		break;
   2880 	case WM_T_82542_2_0:
   2881 	case WM_T_82542_2_1:
   2882 	case WM_T_ICH8:
   2883 		/* No support for jumbo frame */
   2884 		break;
   2885 	default:
   2886 		/* ETHER_MAX_LEN_JUMBO */
   2887 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2888 		break;
   2889 	}
   2890 
   2891 	/* If we're a i82543 or greater, we can support VLANs. */
   2892 	if (sc->sc_type >= WM_T_82543)
   2893 		sc->sc_ethercom.ec_capabilities |=
   2894 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2895 
   2896 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2897 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2898 
   2899 	/*
   2900 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2901 	 * on i82543 and later.
   2902 	 */
   2903 	if (sc->sc_type >= WM_T_82543) {
   2904 		ifp->if_capabilities |=
   2905 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2906 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2907 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2908 		    IFCAP_CSUM_TCPv6_Tx |
   2909 		    IFCAP_CSUM_UDPv6_Tx;
   2910 	}
   2911 
   2912 	/*
   2913 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2914 	 *
   2915 	 *	82541GI (8086:1076) ... no
   2916 	 *	82572EI (8086:10b9) ... yes
   2917 	 */
   2918 	if (sc->sc_type >= WM_T_82571) {
   2919 		ifp->if_capabilities |=
   2920 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2921 	}
   2922 
   2923 	/*
   2924 	 * If we're a i82544 or greater (except i82547), we can do
   2925 	 * TCP segmentation offload.
   2926 	 */
   2927 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2928 		ifp->if_capabilities |= IFCAP_TSOv4;
   2929 	}
   2930 
   2931 	if (sc->sc_type >= WM_T_82571) {
   2932 		ifp->if_capabilities |= IFCAP_TSOv6;
   2933 	}
   2934 
   2935 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2936 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2937 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2938 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2939 
   2940 #ifdef WM_MPSAFE
   2941 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2942 #else
   2943 	sc->sc_core_lock = NULL;
   2944 #endif
   2945 
   2946 	/* Attach the interface. */
   2947 	error = if_initialize(ifp);
   2948 	if (error != 0) {
   2949 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2950 		    error);
   2951 		return; /* Error */
   2952 	}
   2953 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2954 	ether_ifattach(ifp, enaddr);
   2955 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2956 	if_register(ifp);
   2957 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2958 	    RND_FLAG_DEFAULT);
   2959 
   2960 #ifdef WM_EVENT_COUNTERS
   2961 	/* Attach event counters. */
   2962 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2963 	    NULL, xname, "linkintr");
   2964 
   2965 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2966 	    NULL, xname, "tx_xoff");
   2967 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2968 	    NULL, xname, "tx_xon");
   2969 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2970 	    NULL, xname, "rx_xoff");
   2971 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2972 	    NULL, xname, "rx_xon");
   2973 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2974 	    NULL, xname, "rx_macctl");
   2975 #endif /* WM_EVENT_COUNTERS */
   2976 
   2977 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2978 		pmf_class_network_register(self, ifp);
   2979 	else
   2980 		aprint_error_dev(self, "couldn't establish power handler\n");
   2981 
   2982 	sc->sc_flags |= WM_F_ATTACHED;
   2983 out:
   2984 	return;
   2985 }
   2986 
   2987 /* The detach function (ca_detach) */
   2988 static int
   2989 wm_detach(device_t self, int flags __unused)
   2990 {
   2991 	struct wm_softc *sc = device_private(self);
   2992 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2993 	int i;
   2994 
   2995 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2996 		return 0;
   2997 
   2998 	/* Stop the interface. Callouts are stopped in it. */
   2999 	wm_stop(ifp, 1);
   3000 
   3001 	pmf_device_deregister(self);
   3002 
   3003 #ifdef WM_EVENT_COUNTERS
   3004 	evcnt_detach(&sc->sc_ev_linkintr);
   3005 
   3006 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3007 	evcnt_detach(&sc->sc_ev_tx_xon);
   3008 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3009 	evcnt_detach(&sc->sc_ev_rx_xon);
   3010 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3011 #endif /* WM_EVENT_COUNTERS */
   3012 
   3013 	rnd_detach_source(&sc->rnd_source);
   3014 
   3015 	/* Tell the firmware about the release */
   3016 	WM_CORE_LOCK(sc);
   3017 	wm_release_manageability(sc);
   3018 	wm_release_hw_control(sc);
   3019 	wm_enable_wakeup(sc);
   3020 	WM_CORE_UNLOCK(sc);
   3021 
   3022 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3023 
   3024 	/* Delete all remaining media. */
   3025 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3026 
   3027 	ether_ifdetach(ifp);
   3028 	if_detach(ifp);
   3029 	if_percpuq_destroy(sc->sc_ipq);
   3030 
   3031 	/* Unload RX dmamaps and free mbufs */
   3032 	for (i = 0; i < sc->sc_nqueues; i++) {
   3033 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3034 		mutex_enter(rxq->rxq_lock);
   3035 		wm_rxdrain(rxq);
   3036 		mutex_exit(rxq->rxq_lock);
   3037 	}
   3038 	/* Must unlock here */
   3039 
   3040 	/* Disestablish the interrupt handler */
   3041 	for (i = 0; i < sc->sc_nintrs; i++) {
   3042 		if (sc->sc_ihs[i] != NULL) {
   3043 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3044 			sc->sc_ihs[i] = NULL;
   3045 		}
   3046 	}
   3047 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3048 
   3049 	wm_free_txrx_queues(sc);
   3050 
   3051 	/* Unmap the registers */
   3052 	if (sc->sc_ss) {
   3053 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3054 		sc->sc_ss = 0;
   3055 	}
   3056 	if (sc->sc_ios) {
   3057 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3058 		sc->sc_ios = 0;
   3059 	}
   3060 	if (sc->sc_flashs) {
   3061 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3062 		sc->sc_flashs = 0;
   3063 	}
   3064 
   3065 	if (sc->sc_core_lock)
   3066 		mutex_obj_free(sc->sc_core_lock);
   3067 	if (sc->sc_ich_phymtx)
   3068 		mutex_obj_free(sc->sc_ich_phymtx);
   3069 	if (sc->sc_ich_nvmmtx)
   3070 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3071 
   3072 	return 0;
   3073 }
   3074 
   3075 static bool
   3076 wm_suspend(device_t self, const pmf_qual_t *qual)
   3077 {
   3078 	struct wm_softc *sc = device_private(self);
   3079 
   3080 	wm_release_manageability(sc);
   3081 	wm_release_hw_control(sc);
   3082 	wm_enable_wakeup(sc);
   3083 
   3084 	return true;
   3085 }
   3086 
   3087 static bool
   3088 wm_resume(device_t self, const pmf_qual_t *qual)
   3089 {
   3090 	struct wm_softc *sc = device_private(self);
   3091 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3092 	pcireg_t reg;
   3093 	char buf[256];
   3094 
   3095 	reg = CSR_READ(sc, WMREG_WUS);
   3096 	if (reg != 0) {
   3097 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3098 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3099 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3100 	}
   3101 
   3102 	if (sc->sc_type >= WM_T_PCH2)
   3103 		wm_resume_workarounds_pchlan(sc);
   3104 	if ((ifp->if_flags & IFF_UP) == 0) {
   3105 		wm_reset(sc);
   3106 		/* Non-AMT based hardware can now take control from firmware */
   3107 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3108 			wm_get_hw_control(sc);
   3109 		wm_init_manageability(sc);
   3110 	} else {
   3111 		/*
   3112 		 * We called pmf_class_network_register(), so if_init() is
   3113 		 * automatically called when IFF_UP. wm_reset(),
   3114 		 * wm_get_hw_control() and wm_init_manageability() are called
   3115 		 * via wm_init().
   3116 		 */
   3117 	}
   3118 
   3119 	return true;
   3120 }
   3121 
   3122 /*
   3123  * wm_watchdog:		[ifnet interface function]
   3124  *
   3125  *	Watchdog timer handler.
   3126  */
   3127 static void
   3128 wm_watchdog(struct ifnet *ifp)
   3129 {
   3130 	int qid;
   3131 	struct wm_softc *sc = ifp->if_softc;
   3132 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3133 
   3134 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3135 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3136 
   3137 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3138 	}
   3139 
   3140 	/* IF any of queues hanged up, reset the interface. */
   3141 	if (hang_queue != 0) {
   3142 		(void)wm_init(ifp);
   3143 
   3144 		/*
   3145 		 * There are still some upper layer processing which call
   3146 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3147 		 */
   3148 		/* Try to get more packets going. */
   3149 		ifp->if_start(ifp);
   3150 	}
   3151 }
   3152 
   3153 
   3154 static void
   3155 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3156 {
   3157 
   3158 	mutex_enter(txq->txq_lock);
   3159 	if (txq->txq_sending &&
   3160 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3161 		wm_watchdog_txq_locked(ifp, txq, hang);
   3162 
   3163 	mutex_exit(txq->txq_lock);
   3164 }
   3165 
   3166 static void
   3167 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3168     uint16_t *hang)
   3169 {
   3170 	struct wm_softc *sc = ifp->if_softc;
   3171 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3172 
   3173 	KASSERT(mutex_owned(txq->txq_lock));
   3174 
   3175 	/*
   3176 	 * Since we're using delayed interrupts, sweep up
   3177 	 * before we report an error.
   3178 	 */
   3179 	wm_txeof(txq, UINT_MAX);
   3180 
   3181 	if (txq->txq_sending)
   3182 		*hang |= __BIT(wmq->wmq_id);
   3183 
   3184 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3185 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3186 		    device_xname(sc->sc_dev));
   3187 	} else {
   3188 #ifdef WM_DEBUG
   3189 		int i, j;
   3190 		struct wm_txsoft *txs;
   3191 #endif
   3192 		log(LOG_ERR,
   3193 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3194 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3195 		    txq->txq_next);
   3196 		ifp->if_oerrors++;
   3197 #ifdef WM_DEBUG
   3198 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3199 		    i = WM_NEXTTXS(txq, i)) {
   3200 			txs = &txq->txq_soft[i];
   3201 			printf("txs %d tx %d -> %d\n",
   3202 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3203 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3204 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3205 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3206 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3207 					printf("\t %#08x%08x\n",
   3208 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3209 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3210 				} else {
   3211 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3212 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3213 					    txq->txq_descs[j].wtx_addr.wa_low);
   3214 					printf("\t %#04x%02x%02x%08x\n",
   3215 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3216 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3217 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3218 					    txq->txq_descs[j].wtx_cmdlen);
   3219 				}
   3220 				if (j == txs->txs_lastdesc)
   3221 					break;
   3222 			}
   3223 		}
   3224 #endif
   3225 	}
   3226 }
   3227 
   3228 /*
   3229  * wm_tick:
   3230  *
   3231  *	One second timer, used to check link status, sweep up
   3232  *	completed transmit jobs, etc.
   3233  */
   3234 static void
   3235 wm_tick(void *arg)
   3236 {
   3237 	struct wm_softc *sc = arg;
   3238 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3239 #ifndef WM_MPSAFE
   3240 	int s = splnet();
   3241 #endif
   3242 
   3243 	WM_CORE_LOCK(sc);
   3244 
   3245 	if (sc->sc_core_stopping) {
   3246 		WM_CORE_UNLOCK(sc);
   3247 #ifndef WM_MPSAFE
   3248 		splx(s);
   3249 #endif
   3250 		return;
   3251 	}
   3252 
   3253 	if (sc->sc_type >= WM_T_82542_2_1) {
   3254 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3255 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3256 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3257 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3258 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3259 	}
   3260 
   3261 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3262 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3263 	    + CSR_READ(sc, WMREG_CRCERRS)
   3264 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3265 	    + CSR_READ(sc, WMREG_SYMERRC)
   3266 	    + CSR_READ(sc, WMREG_RXERRC)
   3267 	    + CSR_READ(sc, WMREG_SEC)
   3268 	    + CSR_READ(sc, WMREG_CEXTERR)
   3269 	    + CSR_READ(sc, WMREG_RLEC);
   3270 	/*
   3271 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3272 	 * memory. It does not mean the number of dropped packet. Because
   3273 	 * ethernet controller can receive packets in such case if there is
   3274 	 * space in phy's FIFO.
   3275 	 *
   3276 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3277 	 * own EVCNT instead of if_iqdrops.
   3278 	 */
   3279 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3280 
   3281 	if (sc->sc_flags & WM_F_HAS_MII)
   3282 		mii_tick(&sc->sc_mii);
   3283 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3284 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3285 		wm_serdes_tick(sc);
   3286 	else
   3287 		wm_tbi_tick(sc);
   3288 
   3289 	WM_CORE_UNLOCK(sc);
   3290 
   3291 	wm_watchdog(ifp);
   3292 
   3293 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3294 }
   3295 
   3296 static int
   3297 wm_ifflags_cb(struct ethercom *ec)
   3298 {
   3299 	struct ifnet *ifp = &ec->ec_if;
   3300 	struct wm_softc *sc = ifp->if_softc;
   3301 	int iffchange, ecchange;
   3302 	bool needreset = false;
   3303 	int rc = 0;
   3304 
   3305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3306 		device_xname(sc->sc_dev), __func__));
   3307 
   3308 	WM_CORE_LOCK(sc);
   3309 
   3310 	/*
   3311 	 * Check for if_flags.
   3312 	 * Main usage is to prevent linkdown when opening bpf.
   3313 	 */
   3314 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3315 	sc->sc_if_flags = ifp->if_flags;
   3316 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3317 		needreset = true;
   3318 		goto ec;
   3319 	}
   3320 
   3321 	/* iff related updates */
   3322 	if ((iffchange & IFF_PROMISC) != 0)
   3323 		wm_set_filter(sc);
   3324 
   3325 	wm_set_vlan(sc);
   3326 
   3327 ec:
   3328 	/* Check for ec_capenable. */
   3329 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3330 	sc->sc_ec_capenable = ec->ec_capenable;
   3331 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3332 		needreset = true;
   3333 		goto out;
   3334 	}
   3335 
   3336 	/* ec related updates */
   3337 	wm_set_eee(sc);
   3338 
   3339 out:
   3340 	if (needreset)
   3341 		rc = ENETRESET;
   3342 	WM_CORE_UNLOCK(sc);
   3343 
   3344 	return rc;
   3345 }
   3346 
   3347 /*
   3348  * wm_ioctl:		[ifnet interface function]
   3349  *
   3350  *	Handle control requests from the operator.
   3351  */
   3352 static int
   3353 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3354 {
   3355 	struct wm_softc *sc = ifp->if_softc;
   3356 	struct ifreq *ifr = (struct ifreq *)data;
   3357 	struct ifaddr *ifa = (struct ifaddr *)data;
   3358 	struct sockaddr_dl *sdl;
   3359 	int s, error;
   3360 
   3361 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3362 		device_xname(sc->sc_dev), __func__));
   3363 
   3364 #ifndef WM_MPSAFE
   3365 	s = splnet();
   3366 #endif
   3367 	switch (cmd) {
   3368 	case SIOCSIFMEDIA:
   3369 		WM_CORE_LOCK(sc);
   3370 		/* Flow control requires full-duplex mode. */
   3371 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3372 		    (ifr->ifr_media & IFM_FDX) == 0)
   3373 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3374 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3375 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3376 				/* We can do both TXPAUSE and RXPAUSE. */
   3377 				ifr->ifr_media |=
   3378 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3379 			}
   3380 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3381 		}
   3382 		WM_CORE_UNLOCK(sc);
   3383 #ifdef WM_MPSAFE
   3384 		s = splnet();
   3385 #endif
   3386 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3387 #ifdef WM_MPSAFE
   3388 		splx(s);
   3389 #endif
   3390 		break;
   3391 	case SIOCINITIFADDR:
   3392 		WM_CORE_LOCK(sc);
   3393 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3394 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3395 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3396 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3397 			/* Unicast address is the first multicast entry */
   3398 			wm_set_filter(sc);
   3399 			error = 0;
   3400 			WM_CORE_UNLOCK(sc);
   3401 			break;
   3402 		}
   3403 		WM_CORE_UNLOCK(sc);
   3404 		/*FALLTHROUGH*/
   3405 	default:
   3406 #ifdef WM_MPSAFE
   3407 		s = splnet();
   3408 #endif
   3409 		/* It may call wm_start, so unlock here */
   3410 		error = ether_ioctl(ifp, cmd, data);
   3411 #ifdef WM_MPSAFE
   3412 		splx(s);
   3413 #endif
   3414 		if (error != ENETRESET)
   3415 			break;
   3416 
   3417 		error = 0;
   3418 
   3419 		if (cmd == SIOCSIFCAP)
   3420 			error = (*ifp->if_init)(ifp);
   3421 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3422 			;
   3423 		else if (ifp->if_flags & IFF_RUNNING) {
   3424 			/*
   3425 			 * Multicast list has changed; set the hardware filter
   3426 			 * accordingly.
   3427 			 */
   3428 			WM_CORE_LOCK(sc);
   3429 			wm_set_filter(sc);
   3430 			WM_CORE_UNLOCK(sc);
   3431 		}
   3432 		break;
   3433 	}
   3434 
   3435 #ifndef WM_MPSAFE
   3436 	splx(s);
   3437 #endif
   3438 	return error;
   3439 }
   3440 
   3441 /* MAC address related */
   3442 
   3443 /*
   3444  * Get the offset of MAC address and return it.
   3445  * If error occured, use offset 0.
   3446  */
   3447 static uint16_t
   3448 wm_check_alt_mac_addr(struct wm_softc *sc)
   3449 {
   3450 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3451 	uint16_t offset = NVM_OFF_MACADDR;
   3452 
   3453 	/* Try to read alternative MAC address pointer */
   3454 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3455 		return 0;
   3456 
   3457 	/* Check pointer if it's valid or not. */
   3458 	if ((offset == 0x0000) || (offset == 0xffff))
   3459 		return 0;
   3460 
   3461 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3462 	/*
   3463 	 * Check whether alternative MAC address is valid or not.
   3464 	 * Some cards have non 0xffff pointer but those don't use
   3465 	 * alternative MAC address in reality.
   3466 	 *
   3467 	 * Check whether the broadcast bit is set or not.
   3468 	 */
   3469 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3470 		if (((myea[0] & 0xff) & 0x01) == 0)
   3471 			return offset; /* Found */
   3472 
   3473 	/* Not found */
   3474 	return 0;
   3475 }
   3476 
   3477 static int
   3478 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3479 {
   3480 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3481 	uint16_t offset = NVM_OFF_MACADDR;
   3482 	int do_invert = 0;
   3483 
   3484 	switch (sc->sc_type) {
   3485 	case WM_T_82580:
   3486 	case WM_T_I350:
   3487 	case WM_T_I354:
   3488 		/* EEPROM Top Level Partitioning */
   3489 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3490 		break;
   3491 	case WM_T_82571:
   3492 	case WM_T_82575:
   3493 	case WM_T_82576:
   3494 	case WM_T_80003:
   3495 	case WM_T_I210:
   3496 	case WM_T_I211:
   3497 		offset = wm_check_alt_mac_addr(sc);
   3498 		if (offset == 0)
   3499 			if ((sc->sc_funcid & 0x01) == 1)
   3500 				do_invert = 1;
   3501 		break;
   3502 	default:
   3503 		if ((sc->sc_funcid & 0x01) == 1)
   3504 			do_invert = 1;
   3505 		break;
   3506 	}
   3507 
   3508 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3509 		goto bad;
   3510 
   3511 	enaddr[0] = myea[0] & 0xff;
   3512 	enaddr[1] = myea[0] >> 8;
   3513 	enaddr[2] = myea[1] & 0xff;
   3514 	enaddr[3] = myea[1] >> 8;
   3515 	enaddr[4] = myea[2] & 0xff;
   3516 	enaddr[5] = myea[2] >> 8;
   3517 
   3518 	/*
   3519 	 * Toggle the LSB of the MAC address on the second port
   3520 	 * of some dual port cards.
   3521 	 */
   3522 	if (do_invert != 0)
   3523 		enaddr[5] ^= 1;
   3524 
   3525 	return 0;
   3526 
   3527  bad:
   3528 	return -1;
   3529 }
   3530 
   3531 /*
   3532  * wm_set_ral:
   3533  *
   3534  *	Set an entery in the receive address list.
   3535  */
   3536 static void
   3537 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3538 {
   3539 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3540 	uint32_t wlock_mac;
   3541 	int rv;
   3542 
   3543 	if (enaddr != NULL) {
   3544 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3545 		    (enaddr[3] << 24);
   3546 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3547 		ral_hi |= RAL_AV;
   3548 	} else {
   3549 		ral_lo = 0;
   3550 		ral_hi = 0;
   3551 	}
   3552 
   3553 	switch (sc->sc_type) {
   3554 	case WM_T_82542_2_0:
   3555 	case WM_T_82542_2_1:
   3556 	case WM_T_82543:
   3557 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3558 		CSR_WRITE_FLUSH(sc);
   3559 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3560 		CSR_WRITE_FLUSH(sc);
   3561 		break;
   3562 	case WM_T_PCH2:
   3563 	case WM_T_PCH_LPT:
   3564 	case WM_T_PCH_SPT:
   3565 	case WM_T_PCH_CNP:
   3566 		if (idx == 0) {
   3567 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3568 			CSR_WRITE_FLUSH(sc);
   3569 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3570 			CSR_WRITE_FLUSH(sc);
   3571 			return;
   3572 		}
   3573 		if (sc->sc_type != WM_T_PCH2) {
   3574 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3575 			    FWSM_WLOCK_MAC);
   3576 			addrl = WMREG_SHRAL(idx - 1);
   3577 			addrh = WMREG_SHRAH(idx - 1);
   3578 		} else {
   3579 			wlock_mac = 0;
   3580 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3581 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3582 		}
   3583 
   3584 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3585 			rv = wm_get_swflag_ich8lan(sc);
   3586 			if (rv != 0)
   3587 				return;
   3588 			CSR_WRITE(sc, addrl, ral_lo);
   3589 			CSR_WRITE_FLUSH(sc);
   3590 			CSR_WRITE(sc, addrh, ral_hi);
   3591 			CSR_WRITE_FLUSH(sc);
   3592 			wm_put_swflag_ich8lan(sc);
   3593 		}
   3594 
   3595 		break;
   3596 	default:
   3597 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3598 		CSR_WRITE_FLUSH(sc);
   3599 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3600 		CSR_WRITE_FLUSH(sc);
   3601 		break;
   3602 	}
   3603 }
   3604 
   3605 /*
   3606  * wm_mchash:
   3607  *
   3608  *	Compute the hash of the multicast address for the 4096-bit
   3609  *	multicast filter.
   3610  */
   3611 static uint32_t
   3612 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3613 {
   3614 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3615 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3616 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3617 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3618 	uint32_t hash;
   3619 
   3620 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3621 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3622 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3623 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3624 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3625 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3626 		return (hash & 0x3ff);
   3627 	}
   3628 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3629 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3630 
   3631 	return (hash & 0xfff);
   3632 }
   3633 
   3634 /*
   3635  *
   3636  *
   3637  */
   3638 static int
   3639 wm_rar_count(struct wm_softc *sc)
   3640 {
   3641 	int size;
   3642 
   3643 	switch (sc->sc_type) {
   3644 	case WM_T_ICH8:
   3645 		size = WM_RAL_TABSIZE_ICH8 -1;
   3646 		break;
   3647 	case WM_T_ICH9:
   3648 	case WM_T_ICH10:
   3649 	case WM_T_PCH:
   3650 		size = WM_RAL_TABSIZE_ICH8;
   3651 		break;
   3652 	case WM_T_PCH2:
   3653 		size = WM_RAL_TABSIZE_PCH2;
   3654 		break;
   3655 	case WM_T_PCH_LPT:
   3656 	case WM_T_PCH_SPT:
   3657 	case WM_T_PCH_CNP:
   3658 		size = WM_RAL_TABSIZE_PCH_LPT;
   3659 		break;
   3660 	case WM_T_82575:
   3661 	case WM_T_I210:
   3662 	case WM_T_I211:
   3663 		size = WM_RAL_TABSIZE_82575;
   3664 		break;
   3665 	case WM_T_82576:
   3666 	case WM_T_82580:
   3667 		size = WM_RAL_TABSIZE_82576;
   3668 		break;
   3669 	case WM_T_I350:
   3670 	case WM_T_I354:
   3671 		size = WM_RAL_TABSIZE_I350;
   3672 		break;
   3673 	default:
   3674 		size = WM_RAL_TABSIZE;
   3675 	}
   3676 
   3677 	return size;
   3678 }
   3679 
   3680 /*
   3681  * wm_set_filter:
   3682  *
   3683  *	Set up the receive filter.
   3684  */
   3685 static void
   3686 wm_set_filter(struct wm_softc *sc)
   3687 {
   3688 	struct ethercom *ec = &sc->sc_ethercom;
   3689 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3690 	struct ether_multi *enm;
   3691 	struct ether_multistep step;
   3692 	bus_addr_t mta_reg;
   3693 	uint32_t hash, reg, bit;
   3694 	int i, size, ralmax;
   3695 
   3696 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3697 		device_xname(sc->sc_dev), __func__));
   3698 
   3699 	if (sc->sc_type >= WM_T_82544)
   3700 		mta_reg = WMREG_CORDOVA_MTA;
   3701 	else
   3702 		mta_reg = WMREG_MTA;
   3703 
   3704 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3705 
   3706 	if (ifp->if_flags & IFF_BROADCAST)
   3707 		sc->sc_rctl |= RCTL_BAM;
   3708 	if (ifp->if_flags & IFF_PROMISC) {
   3709 		sc->sc_rctl |= RCTL_UPE;
   3710 		goto allmulti;
   3711 	}
   3712 
   3713 	/*
   3714 	 * Set the station address in the first RAL slot, and
   3715 	 * clear the remaining slots.
   3716 	 */
   3717 	size = wm_rar_count(sc);
   3718 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3719 
   3720 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3721 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3722 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3723 		switch (i) {
   3724 		case 0:
   3725 			/* We can use all entries */
   3726 			ralmax = size;
   3727 			break;
   3728 		case 1:
   3729 			/* Only RAR[0] */
   3730 			ralmax = 1;
   3731 			break;
   3732 		default:
   3733 			/* Available SHRA + RAR[0] */
   3734 			ralmax = i + 1;
   3735 		}
   3736 	} else
   3737 		ralmax = size;
   3738 	for (i = 1; i < size; i++) {
   3739 		if (i < ralmax)
   3740 			wm_set_ral(sc, NULL, i);
   3741 	}
   3742 
   3743 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3744 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3745 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3746 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3747 		size = WM_ICH8_MC_TABSIZE;
   3748 	else
   3749 		size = WM_MC_TABSIZE;
   3750 	/* Clear out the multicast table. */
   3751 	for (i = 0; i < size; i++) {
   3752 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3753 		CSR_WRITE_FLUSH(sc);
   3754 	}
   3755 
   3756 	ETHER_LOCK(ec);
   3757 	ETHER_FIRST_MULTI(step, ec, enm);
   3758 	while (enm != NULL) {
   3759 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3760 			ETHER_UNLOCK(ec);
   3761 			/*
   3762 			 * We must listen to a range of multicast addresses.
   3763 			 * For now, just accept all multicasts, rather than
   3764 			 * trying to set only those filter bits needed to match
   3765 			 * the range.  (At this time, the only use of address
   3766 			 * ranges is for IP multicast routing, for which the
   3767 			 * range is big enough to require all bits set.)
   3768 			 */
   3769 			goto allmulti;
   3770 		}
   3771 
   3772 		hash = wm_mchash(sc, enm->enm_addrlo);
   3773 
   3774 		reg = (hash >> 5);
   3775 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3776 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3777 		    || (sc->sc_type == WM_T_PCH2)
   3778 		    || (sc->sc_type == WM_T_PCH_LPT)
   3779 		    || (sc->sc_type == WM_T_PCH_SPT)
   3780 		    || (sc->sc_type == WM_T_PCH_CNP))
   3781 			reg &= 0x1f;
   3782 		else
   3783 			reg &= 0x7f;
   3784 		bit = hash & 0x1f;
   3785 
   3786 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3787 		hash |= 1U << bit;
   3788 
   3789 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3790 			/*
   3791 			 * 82544 Errata 9: Certain register cannot be written
   3792 			 * with particular alignments in PCI-X bus operation
   3793 			 * (FCAH, MTA and VFTA).
   3794 			 */
   3795 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3796 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3797 			CSR_WRITE_FLUSH(sc);
   3798 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3799 			CSR_WRITE_FLUSH(sc);
   3800 		} else {
   3801 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3802 			CSR_WRITE_FLUSH(sc);
   3803 		}
   3804 
   3805 		ETHER_NEXT_MULTI(step, enm);
   3806 	}
   3807 	ETHER_UNLOCK(ec);
   3808 
   3809 	ifp->if_flags &= ~IFF_ALLMULTI;
   3810 	goto setit;
   3811 
   3812  allmulti:
   3813 	ifp->if_flags |= IFF_ALLMULTI;
   3814 	sc->sc_rctl |= RCTL_MPE;
   3815 
   3816  setit:
   3817 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3818 }
   3819 
   3820 /* Reset and init related */
   3821 
   3822 static void
   3823 wm_set_vlan(struct wm_softc *sc)
   3824 {
   3825 
   3826 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3827 		device_xname(sc->sc_dev), __func__));
   3828 
   3829 	/* Deal with VLAN enables. */
   3830 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3831 		sc->sc_ctrl |= CTRL_VME;
   3832 	else
   3833 		sc->sc_ctrl &= ~CTRL_VME;
   3834 
   3835 	/* Write the control registers. */
   3836 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3837 }
   3838 
   3839 static void
   3840 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3841 {
   3842 	uint32_t gcr;
   3843 	pcireg_t ctrl2;
   3844 
   3845 	gcr = CSR_READ(sc, WMREG_GCR);
   3846 
   3847 	/* Only take action if timeout value is defaulted to 0 */
   3848 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3849 		goto out;
   3850 
   3851 	if ((gcr & GCR_CAP_VER2) == 0) {
   3852 		gcr |= GCR_CMPL_TMOUT_10MS;
   3853 		goto out;
   3854 	}
   3855 
   3856 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3857 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3858 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3859 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3860 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3861 
   3862 out:
   3863 	/* Disable completion timeout resend */
   3864 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3865 
   3866 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3867 }
   3868 
   3869 void
   3870 wm_get_auto_rd_done(struct wm_softc *sc)
   3871 {
   3872 	int i;
   3873 
   3874 	/* wait for eeprom to reload */
   3875 	switch (sc->sc_type) {
   3876 	case WM_T_82571:
   3877 	case WM_T_82572:
   3878 	case WM_T_82573:
   3879 	case WM_T_82574:
   3880 	case WM_T_82583:
   3881 	case WM_T_82575:
   3882 	case WM_T_82576:
   3883 	case WM_T_82580:
   3884 	case WM_T_I350:
   3885 	case WM_T_I354:
   3886 	case WM_T_I210:
   3887 	case WM_T_I211:
   3888 	case WM_T_80003:
   3889 	case WM_T_ICH8:
   3890 	case WM_T_ICH9:
   3891 		for (i = 0; i < 10; i++) {
   3892 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3893 				break;
   3894 			delay(1000);
   3895 		}
   3896 		if (i == 10) {
   3897 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3898 			    "complete\n", device_xname(sc->sc_dev));
   3899 		}
   3900 		break;
   3901 	default:
   3902 		break;
   3903 	}
   3904 }
   3905 
   3906 void
   3907 wm_lan_init_done(struct wm_softc *sc)
   3908 {
   3909 	uint32_t reg = 0;
   3910 	int i;
   3911 
   3912 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3913 		device_xname(sc->sc_dev), __func__));
   3914 
   3915 	/* Wait for eeprom to reload */
   3916 	switch (sc->sc_type) {
   3917 	case WM_T_ICH10:
   3918 	case WM_T_PCH:
   3919 	case WM_T_PCH2:
   3920 	case WM_T_PCH_LPT:
   3921 	case WM_T_PCH_SPT:
   3922 	case WM_T_PCH_CNP:
   3923 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3924 			reg = CSR_READ(sc, WMREG_STATUS);
   3925 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3926 				break;
   3927 			delay(100);
   3928 		}
   3929 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3930 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3931 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3932 		}
   3933 		break;
   3934 	default:
   3935 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3936 		    __func__);
   3937 		break;
   3938 	}
   3939 
   3940 	reg &= ~STATUS_LAN_INIT_DONE;
   3941 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3942 }
   3943 
   3944 void
   3945 wm_get_cfg_done(struct wm_softc *sc)
   3946 {
   3947 	int mask;
   3948 	uint32_t reg;
   3949 	int i;
   3950 
   3951 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3952 		device_xname(sc->sc_dev), __func__));
   3953 
   3954 	/* Wait for eeprom to reload */
   3955 	switch (sc->sc_type) {
   3956 	case WM_T_82542_2_0:
   3957 	case WM_T_82542_2_1:
   3958 		/* null */
   3959 		break;
   3960 	case WM_T_82543:
   3961 	case WM_T_82544:
   3962 	case WM_T_82540:
   3963 	case WM_T_82545:
   3964 	case WM_T_82545_3:
   3965 	case WM_T_82546:
   3966 	case WM_T_82546_3:
   3967 	case WM_T_82541:
   3968 	case WM_T_82541_2:
   3969 	case WM_T_82547:
   3970 	case WM_T_82547_2:
   3971 	case WM_T_82573:
   3972 	case WM_T_82574:
   3973 	case WM_T_82583:
   3974 		/* generic */
   3975 		delay(10*1000);
   3976 		break;
   3977 	case WM_T_80003:
   3978 	case WM_T_82571:
   3979 	case WM_T_82572:
   3980 	case WM_T_82575:
   3981 	case WM_T_82576:
   3982 	case WM_T_82580:
   3983 	case WM_T_I350:
   3984 	case WM_T_I354:
   3985 	case WM_T_I210:
   3986 	case WM_T_I211:
   3987 		if (sc->sc_type == WM_T_82571) {
   3988 			/* Only 82571 shares port 0 */
   3989 			mask = EEMNGCTL_CFGDONE_0;
   3990 		} else
   3991 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3992 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3993 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3994 				break;
   3995 			delay(1000);
   3996 		}
   3997 		if (i >= WM_PHY_CFG_TIMEOUT)
   3998 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3999 				device_xname(sc->sc_dev), __func__));
   4000 		break;
   4001 	case WM_T_ICH8:
   4002 	case WM_T_ICH9:
   4003 	case WM_T_ICH10:
   4004 	case WM_T_PCH:
   4005 	case WM_T_PCH2:
   4006 	case WM_T_PCH_LPT:
   4007 	case WM_T_PCH_SPT:
   4008 	case WM_T_PCH_CNP:
   4009 		delay(10*1000);
   4010 		if (sc->sc_type >= WM_T_ICH10)
   4011 			wm_lan_init_done(sc);
   4012 		else
   4013 			wm_get_auto_rd_done(sc);
   4014 
   4015 		/* Clear PHY Reset Asserted bit */
   4016 		reg = CSR_READ(sc, WMREG_STATUS);
   4017 		if ((reg & STATUS_PHYRA) != 0)
   4018 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4019 		break;
   4020 	default:
   4021 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4022 		    __func__);
   4023 		break;
   4024 	}
   4025 }
   4026 
   4027 int
   4028 wm_phy_post_reset(struct wm_softc *sc)
   4029 {
   4030 	device_t dev = sc->sc_dev;
   4031 	uint16_t reg;
   4032 	int rv = 0;
   4033 
   4034 	/* This function is only for ICH8 and newer. */
   4035 	if (sc->sc_type < WM_T_ICH8)
   4036 		return 0;
   4037 
   4038 	if (wm_phy_resetisblocked(sc)) {
   4039 		/* XXX */
   4040 		device_printf(dev, "PHY is blocked\n");
   4041 		return -1;
   4042 	}
   4043 
   4044 	/* Allow time for h/w to get to quiescent state after reset */
   4045 	delay(10*1000);
   4046 
   4047 	/* Perform any necessary post-reset workarounds */
   4048 	if (sc->sc_type == WM_T_PCH)
   4049 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4050 	else if (sc->sc_type == WM_T_PCH2)
   4051 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4052 	if (rv != 0)
   4053 		return rv;
   4054 
   4055 	/* Clear the host wakeup bit after lcd reset */
   4056 	if (sc->sc_type >= WM_T_PCH) {
   4057 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4058 		reg &= ~BM_WUC_HOST_WU_BIT;
   4059 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4060 	}
   4061 
   4062 	/* Configure the LCD with the extended configuration region in NVM */
   4063 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4064 		return rv;
   4065 
   4066 	/* Configure the LCD with the OEM bits in NVM */
   4067 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4068 
   4069 	if (sc->sc_type == WM_T_PCH2) {
   4070 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4071 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4072 			delay(10 * 1000);
   4073 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4074 		}
   4075 		/* Set EEE LPI Update Timer to 200usec */
   4076 		rv = sc->phy.acquire(sc);
   4077 		if (rv)
   4078 			return rv;
   4079 		rv = wm_write_emi_reg_locked(dev,
   4080 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4081 		sc->phy.release(sc);
   4082 	}
   4083 
   4084 	return rv;
   4085 }
   4086 
   4087 /* Only for PCH and newer */
   4088 static int
   4089 wm_write_smbus_addr(struct wm_softc *sc)
   4090 {
   4091 	uint32_t strap, freq;
   4092 	uint16_t phy_data;
   4093 	int rv;
   4094 
   4095 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4096 		device_xname(sc->sc_dev), __func__));
   4097 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4098 
   4099 	strap = CSR_READ(sc, WMREG_STRAP);
   4100 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4101 
   4102 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4103 	if (rv != 0)
   4104 		return -1;
   4105 
   4106 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4107 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4108 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4109 
   4110 	if (sc->sc_phytype == WMPHY_I217) {
   4111 		/* Restore SMBus frequency */
   4112 		if (freq --) {
   4113 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4114 			    | HV_SMB_ADDR_FREQ_HIGH);
   4115 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4116 			    HV_SMB_ADDR_FREQ_LOW);
   4117 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4118 			    HV_SMB_ADDR_FREQ_HIGH);
   4119 		} else
   4120 			DPRINTF(WM_DEBUG_INIT,
   4121 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4122 				device_xname(sc->sc_dev), __func__));
   4123 	}
   4124 
   4125 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4126 	    phy_data);
   4127 }
   4128 
   4129 static int
   4130 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4131 {
   4132 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4133 	uint16_t phy_page = 0;
   4134 	int rv = 0;
   4135 
   4136 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4137 		device_xname(sc->sc_dev), __func__));
   4138 
   4139 	switch (sc->sc_type) {
   4140 	case WM_T_ICH8:
   4141 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4142 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4143 			return 0;
   4144 
   4145 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4146 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4147 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4148 			break;
   4149 		}
   4150 		/* FALLTHROUGH */
   4151 	case WM_T_PCH:
   4152 	case WM_T_PCH2:
   4153 	case WM_T_PCH_LPT:
   4154 	case WM_T_PCH_SPT:
   4155 	case WM_T_PCH_CNP:
   4156 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4157 		break;
   4158 	default:
   4159 		return 0;
   4160 	}
   4161 
   4162 	if ((rv = sc->phy.acquire(sc)) != 0)
   4163 		return rv;
   4164 
   4165 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4166 	if ((reg & sw_cfg_mask) == 0)
   4167 		goto release;
   4168 
   4169 	/*
   4170 	 * Make sure HW does not configure LCD from PHY extended configuration
   4171 	 * before SW configuration
   4172 	 */
   4173 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4174 	if ((sc->sc_type < WM_T_PCH2)
   4175 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4176 		goto release;
   4177 
   4178 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4179 		device_xname(sc->sc_dev), __func__));
   4180 	/* word_addr is in DWORD */
   4181 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4182 
   4183 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4184 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4185 	if (cnf_size == 0)
   4186 		goto release;
   4187 
   4188 	if (((sc->sc_type == WM_T_PCH)
   4189 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4190 	    || (sc->sc_type > WM_T_PCH)) {
   4191 		/*
   4192 		 * HW configures the SMBus address and LEDs when the OEM and
   4193 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4194 		 * are cleared, SW will configure them instead.
   4195 		 */
   4196 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4197 			device_xname(sc->sc_dev), __func__));
   4198 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4199 			goto release;
   4200 
   4201 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4202 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4203 		    (uint16_t)reg);
   4204 		if (rv != 0)
   4205 			goto release;
   4206 	}
   4207 
   4208 	/* Configure LCD from extended configuration region. */
   4209 	for (i = 0; i < cnf_size; i++) {
   4210 		uint16_t reg_data, reg_addr;
   4211 
   4212 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4213 			goto release;
   4214 
   4215 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4216 			goto release;
   4217 
   4218 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4219 			phy_page = reg_data;
   4220 
   4221 		reg_addr &= IGPHY_MAXREGADDR;
   4222 		reg_addr |= phy_page;
   4223 
   4224 		KASSERT(sc->phy.writereg_locked != NULL);
   4225 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4226 		    reg_data);
   4227 	}
   4228 
   4229 release:
   4230 	sc->phy.release(sc);
   4231 	return rv;
   4232 }
   4233 
   4234 /*
   4235  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4236  *  @sc:       pointer to the HW structure
   4237  *  @d0_state: boolean if entering d0 or d3 device state
   4238  *
   4239  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4240  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4241  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4242  */
   4243 int
   4244 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4245 {
   4246 	uint32_t mac_reg;
   4247 	uint16_t oem_reg;
   4248 	int rv;
   4249 
   4250 	if (sc->sc_type < WM_T_PCH)
   4251 		return 0;
   4252 
   4253 	rv = sc->phy.acquire(sc);
   4254 	if (rv != 0)
   4255 		return rv;
   4256 
   4257 	if (sc->sc_type == WM_T_PCH) {
   4258 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4259 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4260 			goto release;
   4261 	}
   4262 
   4263 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4264 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4265 		goto release;
   4266 
   4267 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4268 
   4269 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4270 	if (rv != 0)
   4271 		goto release;
   4272 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4273 
   4274 	if (d0_state) {
   4275 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4276 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4277 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4278 			oem_reg |= HV_OEM_BITS_LPLU;
   4279 	} else {
   4280 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4281 		    != 0)
   4282 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4283 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4284 		    != 0)
   4285 			oem_reg |= HV_OEM_BITS_LPLU;
   4286 	}
   4287 
   4288 	/* Set Restart auto-neg to activate the bits */
   4289 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4290 	    && (wm_phy_resetisblocked(sc) == false))
   4291 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4292 
   4293 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4294 
   4295 release:
   4296 	sc->phy.release(sc);
   4297 
   4298 	return rv;
   4299 }
   4300 
   4301 /* Init hardware bits */
   4302 void
   4303 wm_initialize_hardware_bits(struct wm_softc *sc)
   4304 {
   4305 	uint32_t tarc0, tarc1, reg;
   4306 
   4307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4308 		device_xname(sc->sc_dev), __func__));
   4309 
   4310 	/* For 82571 variant, 80003 and ICHs */
   4311 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4312 	    || (sc->sc_type >= WM_T_80003)) {
   4313 
   4314 		/* Transmit Descriptor Control 0 */
   4315 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4316 		reg |= TXDCTL_COUNT_DESC;
   4317 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4318 
   4319 		/* Transmit Descriptor Control 1 */
   4320 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4321 		reg |= TXDCTL_COUNT_DESC;
   4322 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4323 
   4324 		/* TARC0 */
   4325 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4326 		switch (sc->sc_type) {
   4327 		case WM_T_82571:
   4328 		case WM_T_82572:
   4329 		case WM_T_82573:
   4330 		case WM_T_82574:
   4331 		case WM_T_82583:
   4332 		case WM_T_80003:
   4333 			/* Clear bits 30..27 */
   4334 			tarc0 &= ~__BITS(30, 27);
   4335 			break;
   4336 		default:
   4337 			break;
   4338 		}
   4339 
   4340 		switch (sc->sc_type) {
   4341 		case WM_T_82571:
   4342 		case WM_T_82572:
   4343 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4344 
   4345 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4346 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4347 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4348 			/* 8257[12] Errata No.7 */
   4349 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4350 
   4351 			/* TARC1 bit 28 */
   4352 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4353 				tarc1 &= ~__BIT(28);
   4354 			else
   4355 				tarc1 |= __BIT(28);
   4356 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4357 
   4358 			/*
   4359 			 * 8257[12] Errata No.13
   4360 			 * Disable Dyamic Clock Gating.
   4361 			 */
   4362 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4363 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4364 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4365 			break;
   4366 		case WM_T_82573:
   4367 		case WM_T_82574:
   4368 		case WM_T_82583:
   4369 			if ((sc->sc_type == WM_T_82574)
   4370 			    || (sc->sc_type == WM_T_82583))
   4371 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4372 
   4373 			/* Extended Device Control */
   4374 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4375 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4376 			reg |= __BIT(22);	/* Set bit 22 */
   4377 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4378 
   4379 			/* Device Control */
   4380 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4381 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4382 
   4383 			/* PCIe Control Register */
   4384 			/*
   4385 			 * 82573 Errata (unknown).
   4386 			 *
   4387 			 * 82574 Errata 25 and 82583 Errata 12
   4388 			 * "Dropped Rx Packets":
   4389 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4390 			 */
   4391 			reg = CSR_READ(sc, WMREG_GCR);
   4392 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4393 			CSR_WRITE(sc, WMREG_GCR, reg);
   4394 
   4395 			if ((sc->sc_type == WM_T_82574)
   4396 			    || (sc->sc_type == WM_T_82583)) {
   4397 				/*
   4398 				 * Document says this bit must be set for
   4399 				 * proper operation.
   4400 				 */
   4401 				reg = CSR_READ(sc, WMREG_GCR);
   4402 				reg |= __BIT(22);
   4403 				CSR_WRITE(sc, WMREG_GCR, reg);
   4404 
   4405 				/*
   4406 				 * Apply workaround for hardware errata
   4407 				 * documented in errata docs Fixes issue where
   4408 				 * some error prone or unreliable PCIe
   4409 				 * completions are occurring, particularly
   4410 				 * with ASPM enabled. Without fix, issue can
   4411 				 * cause Tx timeouts.
   4412 				 */
   4413 				reg = CSR_READ(sc, WMREG_GCR2);
   4414 				reg |= __BIT(0);
   4415 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4416 			}
   4417 			break;
   4418 		case WM_T_80003:
   4419 			/* TARC0 */
   4420 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4421 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4422 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4423 
   4424 			/* TARC1 bit 28 */
   4425 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4426 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4427 				tarc1 &= ~__BIT(28);
   4428 			else
   4429 				tarc1 |= __BIT(28);
   4430 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4431 			break;
   4432 		case WM_T_ICH8:
   4433 		case WM_T_ICH9:
   4434 		case WM_T_ICH10:
   4435 		case WM_T_PCH:
   4436 		case WM_T_PCH2:
   4437 		case WM_T_PCH_LPT:
   4438 		case WM_T_PCH_SPT:
   4439 		case WM_T_PCH_CNP:
   4440 			/* TARC0 */
   4441 			if (sc->sc_type == WM_T_ICH8) {
   4442 				/* Set TARC0 bits 29 and 28 */
   4443 				tarc0 |= __BITS(29, 28);
   4444 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4445 				tarc0 |= __BIT(29);
   4446 				/*
   4447 				 *  Drop bit 28. From Linux.
   4448 				 * See I218/I219 spec update
   4449 				 * "5. Buffer Overrun While the I219 is
   4450 				 * Processing DMA Transactions"
   4451 				 */
   4452 				tarc0 &= ~__BIT(28);
   4453 			}
   4454 			/* Set TARC0 bits 23,24,26,27 */
   4455 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4456 
   4457 			/* CTRL_EXT */
   4458 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4459 			reg |= __BIT(22);	/* Set bit 22 */
   4460 			/*
   4461 			 * Enable PHY low-power state when MAC is at D3
   4462 			 * w/o WoL
   4463 			 */
   4464 			if (sc->sc_type >= WM_T_PCH)
   4465 				reg |= CTRL_EXT_PHYPDEN;
   4466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4467 
   4468 			/* TARC1 */
   4469 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4470 			/* bit 28 */
   4471 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4472 				tarc1 &= ~__BIT(28);
   4473 			else
   4474 				tarc1 |= __BIT(28);
   4475 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4476 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4477 
   4478 			/* Device Status */
   4479 			if (sc->sc_type == WM_T_ICH8) {
   4480 				reg = CSR_READ(sc, WMREG_STATUS);
   4481 				reg &= ~__BIT(31);
   4482 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4483 
   4484 			}
   4485 
   4486 			/* IOSFPC */
   4487 			if (sc->sc_type == WM_T_PCH_SPT) {
   4488 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4489 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4490 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4491 			}
   4492 			/*
   4493 			 * Work-around descriptor data corruption issue during
   4494 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4495 			 * capability.
   4496 			 */
   4497 			reg = CSR_READ(sc, WMREG_RFCTL);
   4498 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4499 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4500 			break;
   4501 		default:
   4502 			break;
   4503 		}
   4504 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4505 
   4506 		switch (sc->sc_type) {
   4507 		/*
   4508 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4509 		 * Avoid RSS Hash Value bug.
   4510 		 */
   4511 		case WM_T_82571:
   4512 		case WM_T_82572:
   4513 		case WM_T_82573:
   4514 		case WM_T_80003:
   4515 		case WM_T_ICH8:
   4516 			reg = CSR_READ(sc, WMREG_RFCTL);
   4517 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4518 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4519 			break;
   4520 		case WM_T_82574:
   4521 			/* Use extened Rx descriptor. */
   4522 			reg = CSR_READ(sc, WMREG_RFCTL);
   4523 			reg |= WMREG_RFCTL_EXSTEN;
   4524 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4525 			break;
   4526 		default:
   4527 			break;
   4528 		}
   4529 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4530 		/*
   4531 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4532 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4533 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4534 		 * Correctly by the Device"
   4535 		 *
   4536 		 * I354(C2000) Errata AVR53:
   4537 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4538 		 * Hang"
   4539 		 */
   4540 		reg = CSR_READ(sc, WMREG_RFCTL);
   4541 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4542 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4543 	}
   4544 }
   4545 
   4546 static uint32_t
   4547 wm_rxpbs_adjust_82580(uint32_t val)
   4548 {
   4549 	uint32_t rv = 0;
   4550 
   4551 	if (val < __arraycount(wm_82580_rxpbs_table))
   4552 		rv = wm_82580_rxpbs_table[val];
   4553 
   4554 	return rv;
   4555 }
   4556 
   4557 /*
   4558  * wm_reset_phy:
   4559  *
   4560  *	generic PHY reset function.
   4561  *	Same as e1000_phy_hw_reset_generic()
   4562  */
   4563 static int
   4564 wm_reset_phy(struct wm_softc *sc)
   4565 {
   4566 	uint32_t reg;
   4567 
   4568 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4569 		device_xname(sc->sc_dev), __func__));
   4570 	if (wm_phy_resetisblocked(sc))
   4571 		return -1;
   4572 
   4573 	sc->phy.acquire(sc);
   4574 
   4575 	reg = CSR_READ(sc, WMREG_CTRL);
   4576 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4577 	CSR_WRITE_FLUSH(sc);
   4578 
   4579 	delay(sc->phy.reset_delay_us);
   4580 
   4581 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4582 	CSR_WRITE_FLUSH(sc);
   4583 
   4584 	delay(150);
   4585 
   4586 	sc->phy.release(sc);
   4587 
   4588 	wm_get_cfg_done(sc);
   4589 	wm_phy_post_reset(sc);
   4590 
   4591 	return 0;
   4592 }
   4593 
   4594 /*
   4595  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4596  * so it is enough to check sc->sc_queue[0] only.
   4597  */
   4598 static void
   4599 wm_flush_desc_rings(struct wm_softc *sc)
   4600 {
   4601 	pcireg_t preg;
   4602 	uint32_t reg;
   4603 	struct wm_txqueue *txq;
   4604 	wiseman_txdesc_t *txd;
   4605 	int nexttx;
   4606 	uint32_t rctl;
   4607 
   4608 	/* First, disable MULR fix in FEXTNVM11 */
   4609 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4610 	reg |= FEXTNVM11_DIS_MULRFIX;
   4611 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4612 
   4613 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4614 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4615 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4616 		return;
   4617 
   4618 	/* TX */
   4619 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4620 	    device_xname(sc->sc_dev), preg, reg);
   4621 	reg = CSR_READ(sc, WMREG_TCTL);
   4622 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4623 
   4624 	txq = &sc->sc_queue[0].wmq_txq;
   4625 	nexttx = txq->txq_next;
   4626 	txd = &txq->txq_descs[nexttx];
   4627 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4628 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4629 	txd->wtx_fields.wtxu_status = 0;
   4630 	txd->wtx_fields.wtxu_options = 0;
   4631 	txd->wtx_fields.wtxu_vlan = 0;
   4632 
   4633 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4634 	    BUS_SPACE_BARRIER_WRITE);
   4635 
   4636 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4637 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4638 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4639 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4640 	delay(250);
   4641 
   4642 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4643 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4644 		return;
   4645 
   4646 	/* RX */
   4647 	printf("%s: Need RX flush (reg = %08x)\n",
   4648 	    device_xname(sc->sc_dev), preg);
   4649 	rctl = CSR_READ(sc, WMREG_RCTL);
   4650 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4651 	CSR_WRITE_FLUSH(sc);
   4652 	delay(150);
   4653 
   4654 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4655 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4656 	reg &= 0xffffc000;
   4657 	/*
   4658 	 * Update thresholds: prefetch threshold to 31, host threshold
   4659 	 * to 1 and make sure the granularity is "descriptors" and not
   4660 	 * "cache lines"
   4661 	 */
   4662 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4663 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4664 
   4665 	/* Momentarily enable the RX ring for the changes to take effect */
   4666 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4667 	CSR_WRITE_FLUSH(sc);
   4668 	delay(150);
   4669 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4670 }
   4671 
   4672 /*
   4673  * wm_reset:
   4674  *
   4675  *	Reset the i82542 chip.
   4676  */
   4677 static void
   4678 wm_reset(struct wm_softc *sc)
   4679 {
   4680 	int phy_reset = 0;
   4681 	int i, error = 0;
   4682 	uint32_t reg;
   4683 	uint16_t kmreg;
   4684 	int rv;
   4685 
   4686 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4687 		device_xname(sc->sc_dev), __func__));
   4688 	KASSERT(sc->sc_type != 0);
   4689 
   4690 	/*
   4691 	 * Allocate on-chip memory according to the MTU size.
   4692 	 * The Packet Buffer Allocation register must be written
   4693 	 * before the chip is reset.
   4694 	 */
   4695 	switch (sc->sc_type) {
   4696 	case WM_T_82547:
   4697 	case WM_T_82547_2:
   4698 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4699 		    PBA_22K : PBA_30K;
   4700 		for (i = 0; i < sc->sc_nqueues; i++) {
   4701 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4702 			txq->txq_fifo_head = 0;
   4703 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4704 			txq->txq_fifo_size =
   4705 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4706 			txq->txq_fifo_stall = 0;
   4707 		}
   4708 		break;
   4709 	case WM_T_82571:
   4710 	case WM_T_82572:
   4711 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4712 	case WM_T_80003:
   4713 		sc->sc_pba = PBA_32K;
   4714 		break;
   4715 	case WM_T_82573:
   4716 		sc->sc_pba = PBA_12K;
   4717 		break;
   4718 	case WM_T_82574:
   4719 	case WM_T_82583:
   4720 		sc->sc_pba = PBA_20K;
   4721 		break;
   4722 	case WM_T_82576:
   4723 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4724 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4725 		break;
   4726 	case WM_T_82580:
   4727 	case WM_T_I350:
   4728 	case WM_T_I354:
   4729 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4730 		break;
   4731 	case WM_T_I210:
   4732 	case WM_T_I211:
   4733 		sc->sc_pba = PBA_34K;
   4734 		break;
   4735 	case WM_T_ICH8:
   4736 		/* Workaround for a bit corruption issue in FIFO memory */
   4737 		sc->sc_pba = PBA_8K;
   4738 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4739 		break;
   4740 	case WM_T_ICH9:
   4741 	case WM_T_ICH10:
   4742 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4743 		    PBA_14K : PBA_10K;
   4744 		break;
   4745 	case WM_T_PCH:
   4746 	case WM_T_PCH2:	/* XXX 14K? */
   4747 	case WM_T_PCH_LPT:
   4748 	case WM_T_PCH_SPT:
   4749 	case WM_T_PCH_CNP:
   4750 		sc->sc_pba = PBA_26K;
   4751 		break;
   4752 	default:
   4753 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4754 		    PBA_40K : PBA_48K;
   4755 		break;
   4756 	}
   4757 	/*
   4758 	 * Only old or non-multiqueue devices have the PBA register
   4759 	 * XXX Need special handling for 82575.
   4760 	 */
   4761 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4762 	    || (sc->sc_type == WM_T_82575))
   4763 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4764 
   4765 	/* Prevent the PCI-E bus from sticking */
   4766 	if (sc->sc_flags & WM_F_PCIE) {
   4767 		int timeout = 800;
   4768 
   4769 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4770 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4771 
   4772 		while (timeout--) {
   4773 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4774 			    == 0)
   4775 				break;
   4776 			delay(100);
   4777 		}
   4778 		if (timeout == 0)
   4779 			device_printf(sc->sc_dev,
   4780 			    "failed to disable busmastering\n");
   4781 	}
   4782 
   4783 	/* Set the completion timeout for interface */
   4784 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4785 	    || (sc->sc_type == WM_T_82580)
   4786 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4787 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4788 		wm_set_pcie_completion_timeout(sc);
   4789 
   4790 	/* Clear interrupt */
   4791 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4792 	if (wm_is_using_msix(sc)) {
   4793 		if (sc->sc_type != WM_T_82574) {
   4794 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4795 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4796 		} else
   4797 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4798 	}
   4799 
   4800 	/* Stop the transmit and receive processes. */
   4801 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4802 	sc->sc_rctl &= ~RCTL_EN;
   4803 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4804 	CSR_WRITE_FLUSH(sc);
   4805 
   4806 	/* XXX set_tbi_sbp_82543() */
   4807 
   4808 	delay(10*1000);
   4809 
   4810 	/* Must acquire the MDIO ownership before MAC reset */
   4811 	switch (sc->sc_type) {
   4812 	case WM_T_82573:
   4813 	case WM_T_82574:
   4814 	case WM_T_82583:
   4815 		error = wm_get_hw_semaphore_82573(sc);
   4816 		break;
   4817 	default:
   4818 		break;
   4819 	}
   4820 
   4821 	/*
   4822 	 * 82541 Errata 29? & 82547 Errata 28?
   4823 	 * See also the description about PHY_RST bit in CTRL register
   4824 	 * in 8254x_GBe_SDM.pdf.
   4825 	 */
   4826 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4827 		CSR_WRITE(sc, WMREG_CTRL,
   4828 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4829 		CSR_WRITE_FLUSH(sc);
   4830 		delay(5000);
   4831 	}
   4832 
   4833 	switch (sc->sc_type) {
   4834 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4835 	case WM_T_82541:
   4836 	case WM_T_82541_2:
   4837 	case WM_T_82547:
   4838 	case WM_T_82547_2:
   4839 		/*
   4840 		 * On some chipsets, a reset through a memory-mapped write
   4841 		 * cycle can cause the chip to reset before completing the
   4842 		 * write cycle. This causes major headache that can be avoided
   4843 		 * by issuing the reset via indirect register writes through
   4844 		 * I/O space.
   4845 		 *
   4846 		 * So, if we successfully mapped the I/O BAR at attach time,
   4847 		 * use that. Otherwise, try our luck with a memory-mapped
   4848 		 * reset.
   4849 		 */
   4850 		if (sc->sc_flags & WM_F_IOH_VALID)
   4851 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4852 		else
   4853 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4854 		break;
   4855 	case WM_T_82545_3:
   4856 	case WM_T_82546_3:
   4857 		/* Use the shadow control register on these chips. */
   4858 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4859 		break;
   4860 	case WM_T_80003:
   4861 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4862 		sc->phy.acquire(sc);
   4863 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4864 		sc->phy.release(sc);
   4865 		break;
   4866 	case WM_T_ICH8:
   4867 	case WM_T_ICH9:
   4868 	case WM_T_ICH10:
   4869 	case WM_T_PCH:
   4870 	case WM_T_PCH2:
   4871 	case WM_T_PCH_LPT:
   4872 	case WM_T_PCH_SPT:
   4873 	case WM_T_PCH_CNP:
   4874 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4875 		if (wm_phy_resetisblocked(sc) == false) {
   4876 			/*
   4877 			 * Gate automatic PHY configuration by hardware on
   4878 			 * non-managed 82579
   4879 			 */
   4880 			if ((sc->sc_type == WM_T_PCH2)
   4881 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4882 				== 0))
   4883 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4884 
   4885 			reg |= CTRL_PHY_RESET;
   4886 			phy_reset = 1;
   4887 		} else
   4888 			printf("XXX reset is blocked!!!\n");
   4889 		sc->phy.acquire(sc);
   4890 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4891 		/* Don't insert a completion barrier when reset */
   4892 		delay(20*1000);
   4893 		mutex_exit(sc->sc_ich_phymtx);
   4894 		break;
   4895 	case WM_T_82580:
   4896 	case WM_T_I350:
   4897 	case WM_T_I354:
   4898 	case WM_T_I210:
   4899 	case WM_T_I211:
   4900 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4901 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4902 			CSR_WRITE_FLUSH(sc);
   4903 		delay(5000);
   4904 		break;
   4905 	case WM_T_82542_2_0:
   4906 	case WM_T_82542_2_1:
   4907 	case WM_T_82543:
   4908 	case WM_T_82540:
   4909 	case WM_T_82545:
   4910 	case WM_T_82546:
   4911 	case WM_T_82571:
   4912 	case WM_T_82572:
   4913 	case WM_T_82573:
   4914 	case WM_T_82574:
   4915 	case WM_T_82575:
   4916 	case WM_T_82576:
   4917 	case WM_T_82583:
   4918 	default:
   4919 		/* Everything else can safely use the documented method. */
   4920 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4921 		break;
   4922 	}
   4923 
   4924 	/* Must release the MDIO ownership after MAC reset */
   4925 	switch (sc->sc_type) {
   4926 	case WM_T_82573:
   4927 	case WM_T_82574:
   4928 	case WM_T_82583:
   4929 		if (error == 0)
   4930 			wm_put_hw_semaphore_82573(sc);
   4931 		break;
   4932 	default:
   4933 		break;
   4934 	}
   4935 
   4936 	/* Set Phy Config Counter to 50msec */
   4937 	if (sc->sc_type == WM_T_PCH2) {
   4938 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4939 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4940 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4941 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4942 	}
   4943 
   4944 	if (phy_reset != 0)
   4945 		wm_get_cfg_done(sc);
   4946 
   4947 	/* Reload EEPROM */
   4948 	switch (sc->sc_type) {
   4949 	case WM_T_82542_2_0:
   4950 	case WM_T_82542_2_1:
   4951 	case WM_T_82543:
   4952 	case WM_T_82544:
   4953 		delay(10);
   4954 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4955 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4956 		CSR_WRITE_FLUSH(sc);
   4957 		delay(2000);
   4958 		break;
   4959 	case WM_T_82540:
   4960 	case WM_T_82545:
   4961 	case WM_T_82545_3:
   4962 	case WM_T_82546:
   4963 	case WM_T_82546_3:
   4964 		delay(5*1000);
   4965 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4966 		break;
   4967 	case WM_T_82541:
   4968 	case WM_T_82541_2:
   4969 	case WM_T_82547:
   4970 	case WM_T_82547_2:
   4971 		delay(20000);
   4972 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4973 		break;
   4974 	case WM_T_82571:
   4975 	case WM_T_82572:
   4976 	case WM_T_82573:
   4977 	case WM_T_82574:
   4978 	case WM_T_82583:
   4979 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4980 			delay(10);
   4981 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4982 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4983 			CSR_WRITE_FLUSH(sc);
   4984 		}
   4985 		/* check EECD_EE_AUTORD */
   4986 		wm_get_auto_rd_done(sc);
   4987 		/*
   4988 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4989 		 * is set.
   4990 		 */
   4991 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4992 		    || (sc->sc_type == WM_T_82583))
   4993 			delay(25*1000);
   4994 		break;
   4995 	case WM_T_82575:
   4996 	case WM_T_82576:
   4997 	case WM_T_82580:
   4998 	case WM_T_I350:
   4999 	case WM_T_I354:
   5000 	case WM_T_I210:
   5001 	case WM_T_I211:
   5002 	case WM_T_80003:
   5003 		/* check EECD_EE_AUTORD */
   5004 		wm_get_auto_rd_done(sc);
   5005 		break;
   5006 	case WM_T_ICH8:
   5007 	case WM_T_ICH9:
   5008 	case WM_T_ICH10:
   5009 	case WM_T_PCH:
   5010 	case WM_T_PCH2:
   5011 	case WM_T_PCH_LPT:
   5012 	case WM_T_PCH_SPT:
   5013 	case WM_T_PCH_CNP:
   5014 		break;
   5015 	default:
   5016 		panic("%s: unknown type\n", __func__);
   5017 	}
   5018 
   5019 	/* Check whether EEPROM is present or not */
   5020 	switch (sc->sc_type) {
   5021 	case WM_T_82575:
   5022 	case WM_T_82576:
   5023 	case WM_T_82580:
   5024 	case WM_T_I350:
   5025 	case WM_T_I354:
   5026 	case WM_T_ICH8:
   5027 	case WM_T_ICH9:
   5028 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5029 			/* Not found */
   5030 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5031 			if (sc->sc_type == WM_T_82575)
   5032 				wm_reset_init_script_82575(sc);
   5033 		}
   5034 		break;
   5035 	default:
   5036 		break;
   5037 	}
   5038 
   5039 	if (phy_reset != 0)
   5040 		wm_phy_post_reset(sc);
   5041 
   5042 	if ((sc->sc_type == WM_T_82580)
   5043 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5044 		/* Clear global device reset status bit */
   5045 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5046 	}
   5047 
   5048 	/* Clear any pending interrupt events. */
   5049 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5050 	reg = CSR_READ(sc, WMREG_ICR);
   5051 	if (wm_is_using_msix(sc)) {
   5052 		if (sc->sc_type != WM_T_82574) {
   5053 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5054 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5055 		} else
   5056 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5057 	}
   5058 
   5059 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5060 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5061 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5062 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5063 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5064 		reg |= KABGTXD_BGSQLBIAS;
   5065 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5066 	}
   5067 
   5068 	/* Reload sc_ctrl */
   5069 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5070 
   5071 	wm_set_eee(sc);
   5072 
   5073 	/*
   5074 	 * For PCH, this write will make sure that any noise will be detected
   5075 	 * as a CRC error and be dropped rather than show up as a bad packet
   5076 	 * to the DMA engine
   5077 	 */
   5078 	if (sc->sc_type == WM_T_PCH)
   5079 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5080 
   5081 	if (sc->sc_type >= WM_T_82544)
   5082 		CSR_WRITE(sc, WMREG_WUC, 0);
   5083 
   5084 	if (sc->sc_type < WM_T_82575)
   5085 		wm_disable_aspm(sc); /* Workaround for some chips */
   5086 
   5087 	wm_reset_mdicnfg_82580(sc);
   5088 
   5089 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5090 		wm_pll_workaround_i210(sc);
   5091 
   5092 	if (sc->sc_type == WM_T_80003) {
   5093 		/* Default to TRUE to enable the MDIC W/A */
   5094 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5095 
   5096 		rv = wm_kmrn_readreg(sc,
   5097 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5098 		if (rv == 0) {
   5099 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5100 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5101 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5102 			else
   5103 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5104 		}
   5105 	}
   5106 }
   5107 
   5108 /*
   5109  * wm_add_rxbuf:
   5110  *
   5111  *	Add a receive buffer to the indiciated descriptor.
   5112  */
   5113 static int
   5114 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5115 {
   5116 	struct wm_softc *sc = rxq->rxq_sc;
   5117 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5118 	struct mbuf *m;
   5119 	int error;
   5120 
   5121 	KASSERT(mutex_owned(rxq->rxq_lock));
   5122 
   5123 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5124 	if (m == NULL)
   5125 		return ENOBUFS;
   5126 
   5127 	MCLGET(m, M_DONTWAIT);
   5128 	if ((m->m_flags & M_EXT) == 0) {
   5129 		m_freem(m);
   5130 		return ENOBUFS;
   5131 	}
   5132 
   5133 	if (rxs->rxs_mbuf != NULL)
   5134 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5135 
   5136 	rxs->rxs_mbuf = m;
   5137 
   5138 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5139 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5140 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5141 	if (error) {
   5142 		/* XXX XXX XXX */
   5143 		aprint_error_dev(sc->sc_dev,
   5144 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5145 		panic("wm_add_rxbuf");
   5146 	}
   5147 
   5148 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5149 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5150 
   5151 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5152 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5153 			wm_init_rxdesc(rxq, idx);
   5154 	} else
   5155 		wm_init_rxdesc(rxq, idx);
   5156 
   5157 	return 0;
   5158 }
   5159 
   5160 /*
   5161  * wm_rxdrain:
   5162  *
   5163  *	Drain the receive queue.
   5164  */
   5165 static void
   5166 wm_rxdrain(struct wm_rxqueue *rxq)
   5167 {
   5168 	struct wm_softc *sc = rxq->rxq_sc;
   5169 	struct wm_rxsoft *rxs;
   5170 	int i;
   5171 
   5172 	KASSERT(mutex_owned(rxq->rxq_lock));
   5173 
   5174 	for (i = 0; i < WM_NRXDESC; i++) {
   5175 		rxs = &rxq->rxq_soft[i];
   5176 		if (rxs->rxs_mbuf != NULL) {
   5177 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5178 			m_freem(rxs->rxs_mbuf);
   5179 			rxs->rxs_mbuf = NULL;
   5180 		}
   5181 	}
   5182 }
   5183 
   5184 /*
   5185  * Setup registers for RSS.
   5186  *
   5187  * XXX not yet VMDq support
   5188  */
   5189 static void
   5190 wm_init_rss(struct wm_softc *sc)
   5191 {
   5192 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5193 	int i;
   5194 
   5195 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5196 
   5197 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5198 		int qid, reta_ent;
   5199 
   5200 		qid  = i % sc->sc_nqueues;
   5201 		switch (sc->sc_type) {
   5202 		case WM_T_82574:
   5203 			reta_ent = __SHIFTIN(qid,
   5204 			    RETA_ENT_QINDEX_MASK_82574);
   5205 			break;
   5206 		case WM_T_82575:
   5207 			reta_ent = __SHIFTIN(qid,
   5208 			    RETA_ENT_QINDEX1_MASK_82575);
   5209 			break;
   5210 		default:
   5211 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5212 			break;
   5213 		}
   5214 
   5215 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5216 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5217 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5218 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5219 	}
   5220 
   5221 	rss_getkey((uint8_t *)rss_key);
   5222 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5223 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5224 
   5225 	if (sc->sc_type == WM_T_82574)
   5226 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5227 	else
   5228 		mrqc = MRQC_ENABLE_RSS_MQ;
   5229 
   5230 	/*
   5231 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5232 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5233 	 */
   5234 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5235 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5236 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5237 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5238 
   5239 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5240 }
   5241 
   5242 /*
   5243  * Adjust TX and RX queue numbers which the system actulally uses.
   5244  *
   5245  * The numbers are affected by below parameters.
   5246  *     - The nubmer of hardware queues
   5247  *     - The number of MSI-X vectors (= "nvectors" argument)
   5248  *     - ncpu
   5249  */
   5250 static void
   5251 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5252 {
   5253 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5254 
   5255 	if (nvectors < 2) {
   5256 		sc->sc_nqueues = 1;
   5257 		return;
   5258 	}
   5259 
   5260 	switch (sc->sc_type) {
   5261 	case WM_T_82572:
   5262 		hw_ntxqueues = 2;
   5263 		hw_nrxqueues = 2;
   5264 		break;
   5265 	case WM_T_82574:
   5266 		hw_ntxqueues = 2;
   5267 		hw_nrxqueues = 2;
   5268 		break;
   5269 	case WM_T_82575:
   5270 		hw_ntxqueues = 4;
   5271 		hw_nrxqueues = 4;
   5272 		break;
   5273 	case WM_T_82576:
   5274 		hw_ntxqueues = 16;
   5275 		hw_nrxqueues = 16;
   5276 		break;
   5277 	case WM_T_82580:
   5278 	case WM_T_I350:
   5279 	case WM_T_I354:
   5280 		hw_ntxqueues = 8;
   5281 		hw_nrxqueues = 8;
   5282 		break;
   5283 	case WM_T_I210:
   5284 		hw_ntxqueues = 4;
   5285 		hw_nrxqueues = 4;
   5286 		break;
   5287 	case WM_T_I211:
   5288 		hw_ntxqueues = 2;
   5289 		hw_nrxqueues = 2;
   5290 		break;
   5291 		/*
   5292 		 * As below ethernet controllers does not support MSI-X,
   5293 		 * this driver let them not use multiqueue.
   5294 		 *     - WM_T_80003
   5295 		 *     - WM_T_ICH8
   5296 		 *     - WM_T_ICH9
   5297 		 *     - WM_T_ICH10
   5298 		 *     - WM_T_PCH
   5299 		 *     - WM_T_PCH2
   5300 		 *     - WM_T_PCH_LPT
   5301 		 */
   5302 	default:
   5303 		hw_ntxqueues = 1;
   5304 		hw_nrxqueues = 1;
   5305 		break;
   5306 	}
   5307 
   5308 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5309 
   5310 	/*
   5311 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5312 	 * the number of queues used actually.
   5313 	 */
   5314 	if (nvectors < hw_nqueues + 1)
   5315 		sc->sc_nqueues = nvectors - 1;
   5316 	else
   5317 		sc->sc_nqueues = hw_nqueues;
   5318 
   5319 	/*
   5320 	 * As queues more then cpus cannot improve scaling, we limit
   5321 	 * the number of queues used actually.
   5322 	 */
   5323 	if (ncpu < sc->sc_nqueues)
   5324 		sc->sc_nqueues = ncpu;
   5325 }
   5326 
   5327 static inline bool
   5328 wm_is_using_msix(struct wm_softc *sc)
   5329 {
   5330 
   5331 	return (sc->sc_nintrs > 1);
   5332 }
   5333 
   5334 static inline bool
   5335 wm_is_using_multiqueue(struct wm_softc *sc)
   5336 {
   5337 
   5338 	return (sc->sc_nqueues > 1);
   5339 }
   5340 
   5341 static int
   5342 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5343 {
   5344 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5345 	wmq->wmq_id = qidx;
   5346 	wmq->wmq_intr_idx = intr_idx;
   5347 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5348 #ifdef WM_MPSAFE
   5349 	    | SOFTINT_MPSAFE
   5350 #endif
   5351 	    , wm_handle_queue, wmq);
   5352 	if (wmq->wmq_si != NULL)
   5353 		return 0;
   5354 
   5355 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5356 	    wmq->wmq_id);
   5357 
   5358 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5359 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5360 	return ENOMEM;
   5361 }
   5362 
   5363 /*
   5364  * Both single interrupt MSI and INTx can use this function.
   5365  */
   5366 static int
   5367 wm_setup_legacy(struct wm_softc *sc)
   5368 {
   5369 	pci_chipset_tag_t pc = sc->sc_pc;
   5370 	const char *intrstr = NULL;
   5371 	char intrbuf[PCI_INTRSTR_LEN];
   5372 	int error;
   5373 
   5374 	error = wm_alloc_txrx_queues(sc);
   5375 	if (error) {
   5376 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5377 		    error);
   5378 		return ENOMEM;
   5379 	}
   5380 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5381 	    sizeof(intrbuf));
   5382 #ifdef WM_MPSAFE
   5383 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5384 #endif
   5385 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5386 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5387 	if (sc->sc_ihs[0] == NULL) {
   5388 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5389 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5390 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5391 		return ENOMEM;
   5392 	}
   5393 
   5394 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5395 	sc->sc_nintrs = 1;
   5396 
   5397 	return wm_softint_establish(sc, 0, 0);
   5398 }
   5399 
   5400 static int
   5401 wm_setup_msix(struct wm_softc *sc)
   5402 {
   5403 	void *vih;
   5404 	kcpuset_t *affinity;
   5405 	int qidx, error, intr_idx, txrx_established;
   5406 	pci_chipset_tag_t pc = sc->sc_pc;
   5407 	const char *intrstr = NULL;
   5408 	char intrbuf[PCI_INTRSTR_LEN];
   5409 	char intr_xname[INTRDEVNAMEBUF];
   5410 
   5411 	if (sc->sc_nqueues < ncpu) {
   5412 		/*
   5413 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5414 		 * interrupts start from CPU#1.
   5415 		 */
   5416 		sc->sc_affinity_offset = 1;
   5417 	} else {
   5418 		/*
   5419 		 * In this case, this device use all CPUs. So, we unify
   5420 		 * affinitied cpu_index to msix vector number for readability.
   5421 		 */
   5422 		sc->sc_affinity_offset = 0;
   5423 	}
   5424 
   5425 	error = wm_alloc_txrx_queues(sc);
   5426 	if (error) {
   5427 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5428 		    error);
   5429 		return ENOMEM;
   5430 	}
   5431 
   5432 	kcpuset_create(&affinity, false);
   5433 	intr_idx = 0;
   5434 
   5435 	/*
   5436 	 * TX and RX
   5437 	 */
   5438 	txrx_established = 0;
   5439 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5440 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5441 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5442 
   5443 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5444 		    sizeof(intrbuf));
   5445 #ifdef WM_MPSAFE
   5446 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5447 		    PCI_INTR_MPSAFE, true);
   5448 #endif
   5449 		memset(intr_xname, 0, sizeof(intr_xname));
   5450 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5451 		    device_xname(sc->sc_dev), qidx);
   5452 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5453 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5454 		if (vih == NULL) {
   5455 			aprint_error_dev(sc->sc_dev,
   5456 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5457 			    intrstr ? " at " : "",
   5458 			    intrstr ? intrstr : "");
   5459 
   5460 			goto fail;
   5461 		}
   5462 		kcpuset_zero(affinity);
   5463 		/* Round-robin affinity */
   5464 		kcpuset_set(affinity, affinity_to);
   5465 		error = interrupt_distribute(vih, affinity, NULL);
   5466 		if (error == 0) {
   5467 			aprint_normal_dev(sc->sc_dev,
   5468 			    "for TX and RX interrupting at %s affinity to %u\n",
   5469 			    intrstr, affinity_to);
   5470 		} else {
   5471 			aprint_normal_dev(sc->sc_dev,
   5472 			    "for TX and RX interrupting at %s\n", intrstr);
   5473 		}
   5474 		sc->sc_ihs[intr_idx] = vih;
   5475 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5476 			goto fail;
   5477 		txrx_established++;
   5478 		intr_idx++;
   5479 	}
   5480 
   5481 	/* LINK */
   5482 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5483 	    sizeof(intrbuf));
   5484 #ifdef WM_MPSAFE
   5485 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5486 #endif
   5487 	memset(intr_xname, 0, sizeof(intr_xname));
   5488 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5489 	    device_xname(sc->sc_dev));
   5490 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5491 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5492 	if (vih == NULL) {
   5493 		aprint_error_dev(sc->sc_dev,
   5494 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5495 		    intrstr ? " at " : "",
   5496 		    intrstr ? intrstr : "");
   5497 
   5498 		goto fail;
   5499 	}
   5500 	/* Keep default affinity to LINK interrupt */
   5501 	aprint_normal_dev(sc->sc_dev,
   5502 	    "for LINK interrupting at %s\n", intrstr);
   5503 	sc->sc_ihs[intr_idx] = vih;
   5504 	sc->sc_link_intr_idx = intr_idx;
   5505 
   5506 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5507 	kcpuset_destroy(affinity);
   5508 	return 0;
   5509 
   5510  fail:
   5511 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5512 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5513 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5514 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5515 	}
   5516 
   5517 	kcpuset_destroy(affinity);
   5518 	return ENOMEM;
   5519 }
   5520 
   5521 static void
   5522 wm_unset_stopping_flags(struct wm_softc *sc)
   5523 {
   5524 	int i;
   5525 
   5526 	KASSERT(WM_CORE_LOCKED(sc));
   5527 
   5528 	/* Must unset stopping flags in ascending order. */
   5529 	for (i = 0; i < sc->sc_nqueues; i++) {
   5530 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5531 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5532 
   5533 		mutex_enter(txq->txq_lock);
   5534 		txq->txq_stopping = false;
   5535 		mutex_exit(txq->txq_lock);
   5536 
   5537 		mutex_enter(rxq->rxq_lock);
   5538 		rxq->rxq_stopping = false;
   5539 		mutex_exit(rxq->rxq_lock);
   5540 	}
   5541 
   5542 	sc->sc_core_stopping = false;
   5543 }
   5544 
   5545 static void
   5546 wm_set_stopping_flags(struct wm_softc *sc)
   5547 {
   5548 	int i;
   5549 
   5550 	KASSERT(WM_CORE_LOCKED(sc));
   5551 
   5552 	sc->sc_core_stopping = true;
   5553 
   5554 	/* Must set stopping flags in ascending order. */
   5555 	for (i = 0; i < sc->sc_nqueues; i++) {
   5556 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5557 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5558 
   5559 		mutex_enter(rxq->rxq_lock);
   5560 		rxq->rxq_stopping = true;
   5561 		mutex_exit(rxq->rxq_lock);
   5562 
   5563 		mutex_enter(txq->txq_lock);
   5564 		txq->txq_stopping = true;
   5565 		mutex_exit(txq->txq_lock);
   5566 	}
   5567 }
   5568 
   5569 /*
   5570  * Write interrupt interval value to ITR or EITR
   5571  */
   5572 static void
   5573 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5574 {
   5575 
   5576 	if (!wmq->wmq_set_itr)
   5577 		return;
   5578 
   5579 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5580 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5581 
   5582 		/*
   5583 		 * 82575 doesn't have CNT_INGR field.
   5584 		 * So, overwrite counter field by software.
   5585 		 */
   5586 		if (sc->sc_type == WM_T_82575)
   5587 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5588 		else
   5589 			eitr |= EITR_CNT_INGR;
   5590 
   5591 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5592 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5593 		/*
   5594 		 * 82574 has both ITR and EITR. SET EITR when we use
   5595 		 * the multi queue function with MSI-X.
   5596 		 */
   5597 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5598 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5599 	} else {
   5600 		KASSERT(wmq->wmq_id == 0);
   5601 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5602 	}
   5603 
   5604 	wmq->wmq_set_itr = false;
   5605 }
   5606 
   5607 /*
   5608  * TODO
   5609  * Below dynamic calculation of itr is almost the same as linux igb,
   5610  * however it does not fit to wm(4). So, we will have been disable AIM
   5611  * until we will find appropriate calculation of itr.
   5612  */
   5613 /*
   5614  * calculate interrupt interval value to be going to write register in
   5615  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5616  */
   5617 static void
   5618 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5619 {
   5620 #ifdef NOTYET
   5621 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5622 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5623 	uint32_t avg_size = 0;
   5624 	uint32_t new_itr;
   5625 
   5626 	if (rxq->rxq_packets)
   5627 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5628 	if (txq->txq_packets)
   5629 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5630 
   5631 	if (avg_size == 0) {
   5632 		new_itr = 450; /* restore default value */
   5633 		goto out;
   5634 	}
   5635 
   5636 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5637 	avg_size += 24;
   5638 
   5639 	/* Don't starve jumbo frames */
   5640 	avg_size = uimin(avg_size, 3000);
   5641 
   5642 	/* Give a little boost to mid-size frames */
   5643 	if ((avg_size > 300) && (avg_size < 1200))
   5644 		new_itr = avg_size / 3;
   5645 	else
   5646 		new_itr = avg_size / 2;
   5647 
   5648 out:
   5649 	/*
   5650 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5651 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5652 	 */
   5653 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5654 		new_itr *= 4;
   5655 
   5656 	if (new_itr != wmq->wmq_itr) {
   5657 		wmq->wmq_itr = new_itr;
   5658 		wmq->wmq_set_itr = true;
   5659 	} else
   5660 		wmq->wmq_set_itr = false;
   5661 
   5662 	rxq->rxq_packets = 0;
   5663 	rxq->rxq_bytes = 0;
   5664 	txq->txq_packets = 0;
   5665 	txq->txq_bytes = 0;
   5666 #endif
   5667 }
   5668 
   5669 /*
   5670  * wm_init:		[ifnet interface function]
   5671  *
   5672  *	Initialize the interface.
   5673  */
   5674 static int
   5675 wm_init(struct ifnet *ifp)
   5676 {
   5677 	struct wm_softc *sc = ifp->if_softc;
   5678 	int ret;
   5679 
   5680 	WM_CORE_LOCK(sc);
   5681 	ret = wm_init_locked(ifp);
   5682 	WM_CORE_UNLOCK(sc);
   5683 
   5684 	return ret;
   5685 }
   5686 
   5687 static int
   5688 wm_init_locked(struct ifnet *ifp)
   5689 {
   5690 	struct wm_softc *sc = ifp->if_softc;
   5691 	struct ethercom *ec = &sc->sc_ethercom;
   5692 	int i, j, trynum, error = 0;
   5693 	uint32_t reg;
   5694 
   5695 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5696 		device_xname(sc->sc_dev), __func__));
   5697 	KASSERT(WM_CORE_LOCKED(sc));
   5698 
   5699 	/*
   5700 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5701 	 * There is a small but measurable benefit to avoiding the adjusment
   5702 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5703 	 * on such platforms.  One possibility is that the DMA itself is
   5704 	 * slightly more efficient if the front of the entire packet (instead
   5705 	 * of the front of the headers) is aligned.
   5706 	 *
   5707 	 * Note we must always set align_tweak to 0 if we are using
   5708 	 * jumbo frames.
   5709 	 */
   5710 #ifdef __NO_STRICT_ALIGNMENT
   5711 	sc->sc_align_tweak = 0;
   5712 #else
   5713 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5714 		sc->sc_align_tweak = 0;
   5715 	else
   5716 		sc->sc_align_tweak = 2;
   5717 #endif /* __NO_STRICT_ALIGNMENT */
   5718 
   5719 	/* Cancel any pending I/O. */
   5720 	wm_stop_locked(ifp, 0);
   5721 
   5722 	/* Update statistics before reset */
   5723 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5724 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5725 
   5726 	/* PCH_SPT hardware workaround */
   5727 	if (sc->sc_type == WM_T_PCH_SPT)
   5728 		wm_flush_desc_rings(sc);
   5729 
   5730 	/* Reset the chip to a known state. */
   5731 	wm_reset(sc);
   5732 
   5733 	/*
   5734 	 * AMT based hardware can now take control from firmware
   5735 	 * Do this after reset.
   5736 	 */
   5737 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5738 		wm_get_hw_control(sc);
   5739 
   5740 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5741 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5742 		wm_legacy_irq_quirk_spt(sc);
   5743 
   5744 	/* Init hardware bits */
   5745 	wm_initialize_hardware_bits(sc);
   5746 
   5747 	/* Reset the PHY. */
   5748 	if (sc->sc_flags & WM_F_HAS_MII)
   5749 		wm_gmii_reset(sc);
   5750 
   5751 	if (sc->sc_type >= WM_T_ICH8) {
   5752 		reg = CSR_READ(sc, WMREG_GCR);
   5753 		/*
   5754 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5755 		 * default after reset.
   5756 		 */
   5757 		if (sc->sc_type == WM_T_ICH8)
   5758 			reg |= GCR_NO_SNOOP_ALL;
   5759 		else
   5760 			reg &= ~GCR_NO_SNOOP_ALL;
   5761 		CSR_WRITE(sc, WMREG_GCR, reg);
   5762 	}
   5763 	if ((sc->sc_type >= WM_T_ICH8)
   5764 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5765 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5766 
   5767 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5768 		reg |= CTRL_EXT_RO_DIS;
   5769 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5770 	}
   5771 
   5772 	/* Calculate (E)ITR value */
   5773 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5774 		/*
   5775 		 * For NEWQUEUE's EITR (except for 82575).
   5776 		 * 82575's EITR should be set same throttling value as other
   5777 		 * old controllers' ITR because the interrupt/sec calculation
   5778 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5779 		 *
   5780 		 * 82574's EITR should be set same throttling value as ITR.
   5781 		 *
   5782 		 * For N interrupts/sec, set this value to:
   5783 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5784 		 */
   5785 		sc->sc_itr_init = 450;
   5786 	} else if (sc->sc_type >= WM_T_82543) {
   5787 		/*
   5788 		 * Set up the interrupt throttling register (units of 256ns)
   5789 		 * Note that a footnote in Intel's documentation says this
   5790 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5791 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5792 		 * that that is also true for the 1024ns units of the other
   5793 		 * interrupt-related timer registers -- so, really, we ought
   5794 		 * to divide this value by 4 when the link speed is low.
   5795 		 *
   5796 		 * XXX implement this division at link speed change!
   5797 		 */
   5798 
   5799 		/*
   5800 		 * For N interrupts/sec, set this value to:
   5801 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5802 		 * absolute and packet timer values to this value
   5803 		 * divided by 4 to get "simple timer" behavior.
   5804 		 */
   5805 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5806 	}
   5807 
   5808 	error = wm_init_txrx_queues(sc);
   5809 	if (error)
   5810 		goto out;
   5811 
   5812 	/* Clear out the VLAN table -- we don't use it (yet). */
   5813 	CSR_WRITE(sc, WMREG_VET, 0);
   5814 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5815 		trynum = 10; /* Due to hw errata */
   5816 	else
   5817 		trynum = 1;
   5818 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5819 		for (j = 0; j < trynum; j++)
   5820 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5821 
   5822 	/*
   5823 	 * Set up flow-control parameters.
   5824 	 *
   5825 	 * XXX Values could probably stand some tuning.
   5826 	 */
   5827 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5828 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5829 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5830 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5831 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5832 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5833 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5834 	}
   5835 
   5836 	sc->sc_fcrtl = FCRTL_DFLT;
   5837 	if (sc->sc_type < WM_T_82543) {
   5838 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5839 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5840 	} else {
   5841 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5842 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5843 	}
   5844 
   5845 	if (sc->sc_type == WM_T_80003)
   5846 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5847 	else
   5848 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5849 
   5850 	/* Writes the control register. */
   5851 	wm_set_vlan(sc);
   5852 
   5853 	if (sc->sc_flags & WM_F_HAS_MII) {
   5854 		uint16_t kmreg;
   5855 
   5856 		switch (sc->sc_type) {
   5857 		case WM_T_80003:
   5858 		case WM_T_ICH8:
   5859 		case WM_T_ICH9:
   5860 		case WM_T_ICH10:
   5861 		case WM_T_PCH:
   5862 		case WM_T_PCH2:
   5863 		case WM_T_PCH_LPT:
   5864 		case WM_T_PCH_SPT:
   5865 		case WM_T_PCH_CNP:
   5866 			/*
   5867 			 * Set the mac to wait the maximum time between each
   5868 			 * iteration and increase the max iterations when
   5869 			 * polling the phy; this fixes erroneous timeouts at
   5870 			 * 10Mbps.
   5871 			 */
   5872 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5873 			    0xFFFF);
   5874 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5875 			    &kmreg);
   5876 			kmreg |= 0x3F;
   5877 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5878 			    kmreg);
   5879 			break;
   5880 		default:
   5881 			break;
   5882 		}
   5883 
   5884 		if (sc->sc_type == WM_T_80003) {
   5885 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5886 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5887 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5888 
   5889 			/* Bypass RX and TX FIFO's */
   5890 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5891 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5892 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5893 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5894 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5895 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5896 		}
   5897 	}
   5898 #if 0
   5899 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5900 #endif
   5901 
   5902 	/* Set up checksum offload parameters. */
   5903 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5904 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5905 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5906 		reg |= RXCSUM_IPOFL;
   5907 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5908 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5909 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5910 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5911 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5912 
   5913 	/* Set registers about MSI-X */
   5914 	if (wm_is_using_msix(sc)) {
   5915 		uint32_t ivar;
   5916 		struct wm_queue *wmq;
   5917 		int qid, qintr_idx;
   5918 
   5919 		if (sc->sc_type == WM_T_82575) {
   5920 			/* Interrupt control */
   5921 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5922 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5923 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5924 
   5925 			/* TX and RX */
   5926 			for (i = 0; i < sc->sc_nqueues; i++) {
   5927 				wmq = &sc->sc_queue[i];
   5928 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5929 				    EITR_TX_QUEUE(wmq->wmq_id)
   5930 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5931 			}
   5932 			/* Link status */
   5933 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5934 			    EITR_OTHER);
   5935 		} else if (sc->sc_type == WM_T_82574) {
   5936 			/* Interrupt control */
   5937 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5938 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5939 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5940 
   5941 			/*
   5942 			 * Workaround issue with spurious interrupts
   5943 			 * in MSI-X mode.
   5944 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5945 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5946 			 */
   5947 			reg = CSR_READ(sc, WMREG_RFCTL);
   5948 			reg |= WMREG_RFCTL_ACKDIS;
   5949 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5950 
   5951 			ivar = 0;
   5952 			/* TX and RX */
   5953 			for (i = 0; i < sc->sc_nqueues; i++) {
   5954 				wmq = &sc->sc_queue[i];
   5955 				qid = wmq->wmq_id;
   5956 				qintr_idx = wmq->wmq_intr_idx;
   5957 
   5958 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5959 				    IVAR_TX_MASK_Q_82574(qid));
   5960 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5961 				    IVAR_RX_MASK_Q_82574(qid));
   5962 			}
   5963 			/* Link status */
   5964 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5965 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5966 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5967 		} else {
   5968 			/* Interrupt control */
   5969 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5970 			    | GPIE_EIAME | GPIE_PBA);
   5971 
   5972 			switch (sc->sc_type) {
   5973 			case WM_T_82580:
   5974 			case WM_T_I350:
   5975 			case WM_T_I354:
   5976 			case WM_T_I210:
   5977 			case WM_T_I211:
   5978 				/* TX and RX */
   5979 				for (i = 0; i < sc->sc_nqueues; i++) {
   5980 					wmq = &sc->sc_queue[i];
   5981 					qid = wmq->wmq_id;
   5982 					qintr_idx = wmq->wmq_intr_idx;
   5983 
   5984 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5985 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5986 					ivar |= __SHIFTIN((qintr_idx
   5987 						| IVAR_VALID),
   5988 					    IVAR_TX_MASK_Q(qid));
   5989 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5990 					ivar |= __SHIFTIN((qintr_idx
   5991 						| IVAR_VALID),
   5992 					    IVAR_RX_MASK_Q(qid));
   5993 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5994 				}
   5995 				break;
   5996 			case WM_T_82576:
   5997 				/* TX and RX */
   5998 				for (i = 0; i < sc->sc_nqueues; i++) {
   5999 					wmq = &sc->sc_queue[i];
   6000 					qid = wmq->wmq_id;
   6001 					qintr_idx = wmq->wmq_intr_idx;
   6002 
   6003 					ivar = CSR_READ(sc,
   6004 					    WMREG_IVAR_Q_82576(qid));
   6005 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6006 					ivar |= __SHIFTIN((qintr_idx
   6007 						| IVAR_VALID),
   6008 					    IVAR_TX_MASK_Q_82576(qid));
   6009 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6010 					ivar |= __SHIFTIN((qintr_idx
   6011 						| IVAR_VALID),
   6012 					    IVAR_RX_MASK_Q_82576(qid));
   6013 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6014 					    ivar);
   6015 				}
   6016 				break;
   6017 			default:
   6018 				break;
   6019 			}
   6020 
   6021 			/* Link status */
   6022 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6023 			    IVAR_MISC_OTHER);
   6024 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6025 		}
   6026 
   6027 		if (wm_is_using_multiqueue(sc)) {
   6028 			wm_init_rss(sc);
   6029 
   6030 			/*
   6031 			** NOTE: Receive Full-Packet Checksum Offload
   6032 			** is mutually exclusive with Multiqueue. However
   6033 			** this is not the same as TCP/IP checksums which
   6034 			** still work.
   6035 			*/
   6036 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6037 			reg |= RXCSUM_PCSD;
   6038 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6039 		}
   6040 	}
   6041 
   6042 	/* Set up the interrupt registers. */
   6043 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6044 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6045 	    ICR_RXO | ICR_RXT0;
   6046 	if (wm_is_using_msix(sc)) {
   6047 		uint32_t mask;
   6048 		struct wm_queue *wmq;
   6049 
   6050 		switch (sc->sc_type) {
   6051 		case WM_T_82574:
   6052 			mask = 0;
   6053 			for (i = 0; i < sc->sc_nqueues; i++) {
   6054 				wmq = &sc->sc_queue[i];
   6055 				mask |= ICR_TXQ(wmq->wmq_id);
   6056 				mask |= ICR_RXQ(wmq->wmq_id);
   6057 			}
   6058 			mask |= ICR_OTHER;
   6059 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6060 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6061 			break;
   6062 		default:
   6063 			if (sc->sc_type == WM_T_82575) {
   6064 				mask = 0;
   6065 				for (i = 0; i < sc->sc_nqueues; i++) {
   6066 					wmq = &sc->sc_queue[i];
   6067 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6068 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6069 				}
   6070 				mask |= EITR_OTHER;
   6071 			} else {
   6072 				mask = 0;
   6073 				for (i = 0; i < sc->sc_nqueues; i++) {
   6074 					wmq = &sc->sc_queue[i];
   6075 					mask |= 1 << wmq->wmq_intr_idx;
   6076 				}
   6077 				mask |= 1 << sc->sc_link_intr_idx;
   6078 			}
   6079 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6080 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6081 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6082 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6083 			break;
   6084 		}
   6085 	} else
   6086 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6087 
   6088 	/* Set up the inter-packet gap. */
   6089 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6090 
   6091 	if (sc->sc_type >= WM_T_82543) {
   6092 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6093 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6094 			wm_itrs_writereg(sc, wmq);
   6095 		}
   6096 		/*
   6097 		 * Link interrupts occur much less than TX
   6098 		 * interrupts and RX interrupts. So, we don't
   6099 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6100 		 * FreeBSD's if_igb.
   6101 		 */
   6102 	}
   6103 
   6104 	/* Set the VLAN ethernetype. */
   6105 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6106 
   6107 	/*
   6108 	 * Set up the transmit control register; we start out with
   6109 	 * a collision distance suitable for FDX, but update it whe
   6110 	 * we resolve the media type.
   6111 	 */
   6112 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6113 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6114 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6115 	if (sc->sc_type >= WM_T_82571)
   6116 		sc->sc_tctl |= TCTL_MULR;
   6117 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6118 
   6119 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6120 		/* Write TDT after TCTL.EN is set. See the document. */
   6121 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6122 	}
   6123 
   6124 	if (sc->sc_type == WM_T_80003) {
   6125 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6126 		reg &= ~TCTL_EXT_GCEX_MASK;
   6127 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6128 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6129 	}
   6130 
   6131 	/* Set the media. */
   6132 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6133 		goto out;
   6134 
   6135 	/* Configure for OS presence */
   6136 	wm_init_manageability(sc);
   6137 
   6138 	/*
   6139 	 * Set up the receive control register; we actually program the
   6140 	 * register when we set the receive filter. Use multicast address
   6141 	 * offset type 0.
   6142 	 *
   6143 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6144 	 * don't enable that feature.
   6145 	 */
   6146 	sc->sc_mchash_type = 0;
   6147 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6148 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6149 
   6150 	/* 82574 use one buffer extended Rx descriptor. */
   6151 	if (sc->sc_type == WM_T_82574)
   6152 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6153 
   6154 	/*
   6155 	 * The I350 has a bug where it always strips the CRC whether
   6156 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6157 	 */
   6158 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6159 	    || (sc->sc_type == WM_T_I210))
   6160 		sc->sc_rctl |= RCTL_SECRC;
   6161 
   6162 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6163 	    && (ifp->if_mtu > ETHERMTU)) {
   6164 		sc->sc_rctl |= RCTL_LPE;
   6165 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6166 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6167 	}
   6168 
   6169 	if (MCLBYTES == 2048)
   6170 		sc->sc_rctl |= RCTL_2k;
   6171 	else {
   6172 		if (sc->sc_type >= WM_T_82543) {
   6173 			switch (MCLBYTES) {
   6174 			case 4096:
   6175 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6176 				break;
   6177 			case 8192:
   6178 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6179 				break;
   6180 			case 16384:
   6181 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6182 				break;
   6183 			default:
   6184 				panic("wm_init: MCLBYTES %d unsupported",
   6185 				    MCLBYTES);
   6186 				break;
   6187 			}
   6188 		} else
   6189 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6190 	}
   6191 
   6192 	/* Enable ECC */
   6193 	switch (sc->sc_type) {
   6194 	case WM_T_82571:
   6195 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6196 		reg |= PBA_ECC_CORR_EN;
   6197 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6198 		break;
   6199 	case WM_T_PCH_LPT:
   6200 	case WM_T_PCH_SPT:
   6201 	case WM_T_PCH_CNP:
   6202 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6203 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6204 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6205 
   6206 		sc->sc_ctrl |= CTRL_MEHE;
   6207 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6208 		break;
   6209 	default:
   6210 		break;
   6211 	}
   6212 
   6213 	/*
   6214 	 * Set the receive filter.
   6215 	 *
   6216 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6217 	 * the setting of RCTL.EN in wm_set_filter()
   6218 	 */
   6219 	wm_set_filter(sc);
   6220 
   6221 	/* On 575 and later set RDT only if RX enabled */
   6222 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6223 		int qidx;
   6224 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6225 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6226 			for (i = 0; i < WM_NRXDESC; i++) {
   6227 				mutex_enter(rxq->rxq_lock);
   6228 				wm_init_rxdesc(rxq, i);
   6229 				mutex_exit(rxq->rxq_lock);
   6230 
   6231 			}
   6232 		}
   6233 	}
   6234 
   6235 	wm_unset_stopping_flags(sc);
   6236 
   6237 	/* Start the one second link check clock. */
   6238 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6239 
   6240 	/* ...all done! */
   6241 	ifp->if_flags |= IFF_RUNNING;
   6242 	ifp->if_flags &= ~IFF_OACTIVE;
   6243 
   6244  out:
   6245 	/* Save last flags for the callback */
   6246 	sc->sc_if_flags = ifp->if_flags;
   6247 	sc->sc_ec_capenable = ec->ec_capenable;
   6248 	if (error)
   6249 		log(LOG_ERR, "%s: interface not running\n",
   6250 		    device_xname(sc->sc_dev));
   6251 	return error;
   6252 }
   6253 
   6254 /*
   6255  * wm_stop:		[ifnet interface function]
   6256  *
   6257  *	Stop transmission on the interface.
   6258  */
   6259 static void
   6260 wm_stop(struct ifnet *ifp, int disable)
   6261 {
   6262 	struct wm_softc *sc = ifp->if_softc;
   6263 
   6264 	WM_CORE_LOCK(sc);
   6265 	wm_stop_locked(ifp, disable);
   6266 	WM_CORE_UNLOCK(sc);
   6267 }
   6268 
   6269 static void
   6270 wm_stop_locked(struct ifnet *ifp, int disable)
   6271 {
   6272 	struct wm_softc *sc = ifp->if_softc;
   6273 	struct wm_txsoft *txs;
   6274 	int i, qidx;
   6275 
   6276 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6277 		device_xname(sc->sc_dev), __func__));
   6278 	KASSERT(WM_CORE_LOCKED(sc));
   6279 
   6280 	wm_set_stopping_flags(sc);
   6281 
   6282 	/* Stop the one second clock. */
   6283 	callout_stop(&sc->sc_tick_ch);
   6284 
   6285 	/* Stop the 82547 Tx FIFO stall check timer. */
   6286 	if (sc->sc_type == WM_T_82547)
   6287 		callout_stop(&sc->sc_txfifo_ch);
   6288 
   6289 	if (sc->sc_flags & WM_F_HAS_MII) {
   6290 		/* Down the MII. */
   6291 		mii_down(&sc->sc_mii);
   6292 	} else {
   6293 #if 0
   6294 		/* Should we clear PHY's status properly? */
   6295 		wm_reset(sc);
   6296 #endif
   6297 	}
   6298 
   6299 	/* Stop the transmit and receive processes. */
   6300 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6301 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6302 	sc->sc_rctl &= ~RCTL_EN;
   6303 
   6304 	/*
   6305 	 * Clear the interrupt mask to ensure the device cannot assert its
   6306 	 * interrupt line.
   6307 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6308 	 * service any currently pending or shared interrupt.
   6309 	 */
   6310 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6311 	sc->sc_icr = 0;
   6312 	if (wm_is_using_msix(sc)) {
   6313 		if (sc->sc_type != WM_T_82574) {
   6314 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6315 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6316 		} else
   6317 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6318 	}
   6319 
   6320 	/* Release any queued transmit buffers. */
   6321 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6322 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6323 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6324 		mutex_enter(txq->txq_lock);
   6325 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6326 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6327 			txs = &txq->txq_soft[i];
   6328 			if (txs->txs_mbuf != NULL) {
   6329 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6330 				m_freem(txs->txs_mbuf);
   6331 				txs->txs_mbuf = NULL;
   6332 			}
   6333 		}
   6334 		mutex_exit(txq->txq_lock);
   6335 	}
   6336 
   6337 	/* Mark the interface as down and cancel the watchdog timer. */
   6338 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6339 
   6340 	if (disable) {
   6341 		for (i = 0; i < sc->sc_nqueues; i++) {
   6342 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6343 			mutex_enter(rxq->rxq_lock);
   6344 			wm_rxdrain(rxq);
   6345 			mutex_exit(rxq->rxq_lock);
   6346 		}
   6347 	}
   6348 
   6349 #if 0 /* notyet */
   6350 	if (sc->sc_type >= WM_T_82544)
   6351 		CSR_WRITE(sc, WMREG_WUC, 0);
   6352 #endif
   6353 }
   6354 
   6355 static void
   6356 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6357 {
   6358 	struct mbuf *m;
   6359 	int i;
   6360 
   6361 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6362 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6363 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6364 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6365 		    m->m_data, m->m_len, m->m_flags);
   6366 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6367 	    i, i == 1 ? "" : "s");
   6368 }
   6369 
   6370 /*
   6371  * wm_82547_txfifo_stall:
   6372  *
   6373  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6374  *	reset the FIFO pointers, and restart packet transmission.
   6375  */
   6376 static void
   6377 wm_82547_txfifo_stall(void *arg)
   6378 {
   6379 	struct wm_softc *sc = arg;
   6380 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6381 
   6382 	mutex_enter(txq->txq_lock);
   6383 
   6384 	if (txq->txq_stopping)
   6385 		goto out;
   6386 
   6387 	if (txq->txq_fifo_stall) {
   6388 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6389 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6390 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6391 			/*
   6392 			 * Packets have drained.  Stop transmitter, reset
   6393 			 * FIFO pointers, restart transmitter, and kick
   6394 			 * the packet queue.
   6395 			 */
   6396 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6397 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6398 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6399 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6400 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6401 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6402 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6403 			CSR_WRITE_FLUSH(sc);
   6404 
   6405 			txq->txq_fifo_head = 0;
   6406 			txq->txq_fifo_stall = 0;
   6407 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6408 		} else {
   6409 			/*
   6410 			 * Still waiting for packets to drain; try again in
   6411 			 * another tick.
   6412 			 */
   6413 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6414 		}
   6415 	}
   6416 
   6417 out:
   6418 	mutex_exit(txq->txq_lock);
   6419 }
   6420 
   6421 /*
   6422  * wm_82547_txfifo_bugchk:
   6423  *
   6424  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6425  *	prevent enqueueing a packet that would wrap around the end
   6426  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6427  *
   6428  *	We do this by checking the amount of space before the end
   6429  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6430  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6431  *	the internal FIFO pointers to the beginning, and restart
   6432  *	transmission on the interface.
   6433  */
   6434 #define	WM_FIFO_HDR		0x10
   6435 #define	WM_82547_PAD_LEN	0x3e0
   6436 static int
   6437 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6438 {
   6439 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6440 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6441 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6442 
   6443 	/* Just return if already stalled. */
   6444 	if (txq->txq_fifo_stall)
   6445 		return 1;
   6446 
   6447 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6448 		/* Stall only occurs in half-duplex mode. */
   6449 		goto send_packet;
   6450 	}
   6451 
   6452 	if (len >= WM_82547_PAD_LEN + space) {
   6453 		txq->txq_fifo_stall = 1;
   6454 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6455 		return 1;
   6456 	}
   6457 
   6458  send_packet:
   6459 	txq->txq_fifo_head += len;
   6460 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6461 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6462 
   6463 	return 0;
   6464 }
   6465 
   6466 static int
   6467 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6468 {
   6469 	int error;
   6470 
   6471 	/*
   6472 	 * Allocate the control data structures, and create and load the
   6473 	 * DMA map for it.
   6474 	 *
   6475 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6476 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6477 	 * both sets within the same 4G segment.
   6478 	 */
   6479 	if (sc->sc_type < WM_T_82544)
   6480 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6481 	else
   6482 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6483 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6484 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6485 	else
   6486 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6487 
   6488 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6489 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6490 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6491 		aprint_error_dev(sc->sc_dev,
   6492 		    "unable to allocate TX control data, error = %d\n",
   6493 		    error);
   6494 		goto fail_0;
   6495 	}
   6496 
   6497 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6498 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6499 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6500 		aprint_error_dev(sc->sc_dev,
   6501 		    "unable to map TX control data, error = %d\n", error);
   6502 		goto fail_1;
   6503 	}
   6504 
   6505 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6506 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6507 		aprint_error_dev(sc->sc_dev,
   6508 		    "unable to create TX control data DMA map, error = %d\n",
   6509 		    error);
   6510 		goto fail_2;
   6511 	}
   6512 
   6513 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6514 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6515 		aprint_error_dev(sc->sc_dev,
   6516 		    "unable to load TX control data DMA map, error = %d\n",
   6517 		    error);
   6518 		goto fail_3;
   6519 	}
   6520 
   6521 	return 0;
   6522 
   6523  fail_3:
   6524 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6525  fail_2:
   6526 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6527 	    WM_TXDESCS_SIZE(txq));
   6528  fail_1:
   6529 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6530  fail_0:
   6531 	return error;
   6532 }
   6533 
   6534 static void
   6535 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6536 {
   6537 
   6538 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6539 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6540 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6541 	    WM_TXDESCS_SIZE(txq));
   6542 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6543 }
   6544 
   6545 static int
   6546 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6547 {
   6548 	int error;
   6549 	size_t rxq_descs_size;
   6550 
   6551 	/*
   6552 	 * Allocate the control data structures, and create and load the
   6553 	 * DMA map for it.
   6554 	 *
   6555 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6556 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6557 	 * both sets within the same 4G segment.
   6558 	 */
   6559 	rxq->rxq_ndesc = WM_NRXDESC;
   6560 	if (sc->sc_type == WM_T_82574)
   6561 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6562 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6563 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6564 	else
   6565 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6566 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6567 
   6568 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6569 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6570 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6571 		aprint_error_dev(sc->sc_dev,
   6572 		    "unable to allocate RX control data, error = %d\n",
   6573 		    error);
   6574 		goto fail_0;
   6575 	}
   6576 
   6577 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6578 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6579 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6580 		aprint_error_dev(sc->sc_dev,
   6581 		    "unable to map RX control data, error = %d\n", error);
   6582 		goto fail_1;
   6583 	}
   6584 
   6585 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6586 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6587 		aprint_error_dev(sc->sc_dev,
   6588 		    "unable to create RX control data DMA map, error = %d\n",
   6589 		    error);
   6590 		goto fail_2;
   6591 	}
   6592 
   6593 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6594 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6595 		aprint_error_dev(sc->sc_dev,
   6596 		    "unable to load RX control data DMA map, error = %d\n",
   6597 		    error);
   6598 		goto fail_3;
   6599 	}
   6600 
   6601 	return 0;
   6602 
   6603  fail_3:
   6604 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6605  fail_2:
   6606 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6607 	    rxq_descs_size);
   6608  fail_1:
   6609 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6610  fail_0:
   6611 	return error;
   6612 }
   6613 
   6614 static void
   6615 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6616 {
   6617 
   6618 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6619 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6620 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6621 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6622 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6623 }
   6624 
   6625 
   6626 static int
   6627 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6628 {
   6629 	int i, error;
   6630 
   6631 	/* Create the transmit buffer DMA maps. */
   6632 	WM_TXQUEUELEN(txq) =
   6633 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6634 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6635 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6636 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6637 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6638 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6639 			aprint_error_dev(sc->sc_dev,
   6640 			    "unable to create Tx DMA map %d, error = %d\n",
   6641 			    i, error);
   6642 			goto fail;
   6643 		}
   6644 	}
   6645 
   6646 	return 0;
   6647 
   6648  fail:
   6649 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6650 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6651 			bus_dmamap_destroy(sc->sc_dmat,
   6652 			    txq->txq_soft[i].txs_dmamap);
   6653 	}
   6654 	return error;
   6655 }
   6656 
   6657 static void
   6658 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6659 {
   6660 	int i;
   6661 
   6662 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6663 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6664 			bus_dmamap_destroy(sc->sc_dmat,
   6665 			    txq->txq_soft[i].txs_dmamap);
   6666 	}
   6667 }
   6668 
   6669 static int
   6670 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6671 {
   6672 	int i, error;
   6673 
   6674 	/* Create the receive buffer DMA maps. */
   6675 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6676 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6677 			    MCLBYTES, 0, 0,
   6678 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6679 			aprint_error_dev(sc->sc_dev,
   6680 			    "unable to create Rx DMA map %d error = %d\n",
   6681 			    i, error);
   6682 			goto fail;
   6683 		}
   6684 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6685 	}
   6686 
   6687 	return 0;
   6688 
   6689  fail:
   6690 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6691 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6692 			bus_dmamap_destroy(sc->sc_dmat,
   6693 			    rxq->rxq_soft[i].rxs_dmamap);
   6694 	}
   6695 	return error;
   6696 }
   6697 
   6698 static void
   6699 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6700 {
   6701 	int i;
   6702 
   6703 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6704 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6705 			bus_dmamap_destroy(sc->sc_dmat,
   6706 			    rxq->rxq_soft[i].rxs_dmamap);
   6707 	}
   6708 }
   6709 
   6710 /*
   6711  * wm_alloc_quques:
   6712  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6713  */
   6714 static int
   6715 wm_alloc_txrx_queues(struct wm_softc *sc)
   6716 {
   6717 	int i, error, tx_done, rx_done;
   6718 
   6719 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6720 	    KM_SLEEP);
   6721 	if (sc->sc_queue == NULL) {
   6722 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6723 		error = ENOMEM;
   6724 		goto fail_0;
   6725 	}
   6726 
   6727 	/* For transmission */
   6728 	error = 0;
   6729 	tx_done = 0;
   6730 	for (i = 0; i < sc->sc_nqueues; i++) {
   6731 #ifdef WM_EVENT_COUNTERS
   6732 		int j;
   6733 		const char *xname;
   6734 #endif
   6735 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6736 		txq->txq_sc = sc;
   6737 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6738 
   6739 		error = wm_alloc_tx_descs(sc, txq);
   6740 		if (error)
   6741 			break;
   6742 		error = wm_alloc_tx_buffer(sc, txq);
   6743 		if (error) {
   6744 			wm_free_tx_descs(sc, txq);
   6745 			break;
   6746 		}
   6747 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6748 		if (txq->txq_interq == NULL) {
   6749 			wm_free_tx_descs(sc, txq);
   6750 			wm_free_tx_buffer(sc, txq);
   6751 			error = ENOMEM;
   6752 			break;
   6753 		}
   6754 
   6755 #ifdef WM_EVENT_COUNTERS
   6756 		xname = device_xname(sc->sc_dev);
   6757 
   6758 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6759 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6760 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6761 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6762 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6764 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6765 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6769 
   6770 		for (j = 0; j < WM_NTXSEGS; j++) {
   6771 			snprintf(txq->txq_txseg_evcnt_names[j],
   6772 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6773 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6774 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6775 		}
   6776 
   6777 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6778 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6779 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6780 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6781 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6782 #endif /* WM_EVENT_COUNTERS */
   6783 
   6784 		tx_done++;
   6785 	}
   6786 	if (error)
   6787 		goto fail_1;
   6788 
   6789 	/* For recieve */
   6790 	error = 0;
   6791 	rx_done = 0;
   6792 	for (i = 0; i < sc->sc_nqueues; i++) {
   6793 #ifdef WM_EVENT_COUNTERS
   6794 		const char *xname;
   6795 #endif
   6796 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6797 		rxq->rxq_sc = sc;
   6798 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6799 
   6800 		error = wm_alloc_rx_descs(sc, rxq);
   6801 		if (error)
   6802 			break;
   6803 
   6804 		error = wm_alloc_rx_buffer(sc, rxq);
   6805 		if (error) {
   6806 			wm_free_rx_descs(sc, rxq);
   6807 			break;
   6808 		}
   6809 
   6810 #ifdef WM_EVENT_COUNTERS
   6811 		xname = device_xname(sc->sc_dev);
   6812 
   6813 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6814 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6815 
   6816 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6817 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6818 #endif /* WM_EVENT_COUNTERS */
   6819 
   6820 		rx_done++;
   6821 	}
   6822 	if (error)
   6823 		goto fail_2;
   6824 
   6825 	return 0;
   6826 
   6827  fail_2:
   6828 	for (i = 0; i < rx_done; i++) {
   6829 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6830 		wm_free_rx_buffer(sc, rxq);
   6831 		wm_free_rx_descs(sc, rxq);
   6832 		if (rxq->rxq_lock)
   6833 			mutex_obj_free(rxq->rxq_lock);
   6834 	}
   6835  fail_1:
   6836 	for (i = 0; i < tx_done; i++) {
   6837 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6838 		pcq_destroy(txq->txq_interq);
   6839 		wm_free_tx_buffer(sc, txq);
   6840 		wm_free_tx_descs(sc, txq);
   6841 		if (txq->txq_lock)
   6842 			mutex_obj_free(txq->txq_lock);
   6843 	}
   6844 
   6845 	kmem_free(sc->sc_queue,
   6846 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6847  fail_0:
   6848 	return error;
   6849 }
   6850 
   6851 /*
   6852  * wm_free_quques:
   6853  *	Free {tx,rx}descs and {tx,rx} buffers
   6854  */
   6855 static void
   6856 wm_free_txrx_queues(struct wm_softc *sc)
   6857 {
   6858 	int i;
   6859 
   6860 	for (i = 0; i < sc->sc_nqueues; i++) {
   6861 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6862 
   6863 #ifdef WM_EVENT_COUNTERS
   6864 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6865 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6866 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6867 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6868 #endif /* WM_EVENT_COUNTERS */
   6869 
   6870 		wm_free_rx_buffer(sc, rxq);
   6871 		wm_free_rx_descs(sc, rxq);
   6872 		if (rxq->rxq_lock)
   6873 			mutex_obj_free(rxq->rxq_lock);
   6874 	}
   6875 
   6876 	for (i = 0; i < sc->sc_nqueues; i++) {
   6877 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6878 		struct mbuf *m;
   6879 #ifdef WM_EVENT_COUNTERS
   6880 		int j;
   6881 
   6882 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6883 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6884 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6885 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6886 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6887 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6888 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6893 
   6894 		for (j = 0; j < WM_NTXSEGS; j++)
   6895 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6896 
   6897 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6898 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6899 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6900 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6901 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6902 #endif /* WM_EVENT_COUNTERS */
   6903 
   6904 		/* Drain txq_interq */
   6905 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6906 			m_freem(m);
   6907 		pcq_destroy(txq->txq_interq);
   6908 
   6909 		wm_free_tx_buffer(sc, txq);
   6910 		wm_free_tx_descs(sc, txq);
   6911 		if (txq->txq_lock)
   6912 			mutex_obj_free(txq->txq_lock);
   6913 	}
   6914 
   6915 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6916 }
   6917 
   6918 static void
   6919 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6920 {
   6921 
   6922 	KASSERT(mutex_owned(txq->txq_lock));
   6923 
   6924 	/* Initialize the transmit descriptor ring. */
   6925 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6926 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6927 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6928 	txq->txq_free = WM_NTXDESC(txq);
   6929 	txq->txq_next = 0;
   6930 }
   6931 
   6932 static void
   6933 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6934     struct wm_txqueue *txq)
   6935 {
   6936 
   6937 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6938 		device_xname(sc->sc_dev), __func__));
   6939 	KASSERT(mutex_owned(txq->txq_lock));
   6940 
   6941 	if (sc->sc_type < WM_T_82543) {
   6942 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6943 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6944 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6945 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6946 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6947 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6948 	} else {
   6949 		int qid = wmq->wmq_id;
   6950 
   6951 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6952 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6953 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6954 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6955 
   6956 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6957 			/*
   6958 			 * Don't write TDT before TCTL.EN is set.
   6959 			 * See the document.
   6960 			 */
   6961 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6962 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6963 			    | TXDCTL_WTHRESH(0));
   6964 		else {
   6965 			/* XXX should update with AIM? */
   6966 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6967 			if (sc->sc_type >= WM_T_82540) {
   6968 				/* Should be the same */
   6969 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6970 			}
   6971 
   6972 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6973 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6974 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6975 		}
   6976 	}
   6977 }
   6978 
   6979 static void
   6980 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6981 {
   6982 	int i;
   6983 
   6984 	KASSERT(mutex_owned(txq->txq_lock));
   6985 
   6986 	/* Initialize the transmit job descriptors. */
   6987 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6988 		txq->txq_soft[i].txs_mbuf = NULL;
   6989 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6990 	txq->txq_snext = 0;
   6991 	txq->txq_sdirty = 0;
   6992 }
   6993 
   6994 static void
   6995 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6996     struct wm_txqueue *txq)
   6997 {
   6998 
   6999 	KASSERT(mutex_owned(txq->txq_lock));
   7000 
   7001 	/*
   7002 	 * Set up some register offsets that are different between
   7003 	 * the i82542 and the i82543 and later chips.
   7004 	 */
   7005 	if (sc->sc_type < WM_T_82543)
   7006 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7007 	else
   7008 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7009 
   7010 	wm_init_tx_descs(sc, txq);
   7011 	wm_init_tx_regs(sc, wmq, txq);
   7012 	wm_init_tx_buffer(sc, txq);
   7013 
   7014 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7015 	txq->txq_sending = false;
   7016 }
   7017 
   7018 static void
   7019 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7020     struct wm_rxqueue *rxq)
   7021 {
   7022 
   7023 	KASSERT(mutex_owned(rxq->rxq_lock));
   7024 
   7025 	/*
   7026 	 * Initialize the receive descriptor and receive job
   7027 	 * descriptor rings.
   7028 	 */
   7029 	if (sc->sc_type < WM_T_82543) {
   7030 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7031 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7032 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7033 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7034 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7035 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7036 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7037 
   7038 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7039 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7040 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7041 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7042 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7043 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7044 	} else {
   7045 		int qid = wmq->wmq_id;
   7046 
   7047 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7048 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7049 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7050 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7051 
   7052 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7053 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7054 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7055 
   7056 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7057 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7058 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7059 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7060 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7061 			    | RXDCTL_WTHRESH(1));
   7062 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7063 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7064 		} else {
   7065 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7066 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7067 			/* XXX should update with AIM? */
   7068 			CSR_WRITE(sc, WMREG_RDTR,
   7069 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7070 			/* MUST be same */
   7071 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7072 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7073 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7074 		}
   7075 	}
   7076 }
   7077 
   7078 static int
   7079 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7080 {
   7081 	struct wm_rxsoft *rxs;
   7082 	int error, i;
   7083 
   7084 	KASSERT(mutex_owned(rxq->rxq_lock));
   7085 
   7086 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7087 		rxs = &rxq->rxq_soft[i];
   7088 		if (rxs->rxs_mbuf == NULL) {
   7089 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7090 				log(LOG_ERR, "%s: unable to allocate or map "
   7091 				    "rx buffer %d, error = %d\n",
   7092 				    device_xname(sc->sc_dev), i, error);
   7093 				/*
   7094 				 * XXX Should attempt to run with fewer receive
   7095 				 * XXX buffers instead of just failing.
   7096 				 */
   7097 				wm_rxdrain(rxq);
   7098 				return ENOMEM;
   7099 			}
   7100 		} else {
   7101 			/*
   7102 			 * For 82575 and 82576, the RX descriptors must be
   7103 			 * initialized after the setting of RCTL.EN in
   7104 			 * wm_set_filter()
   7105 			 */
   7106 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7107 				wm_init_rxdesc(rxq, i);
   7108 		}
   7109 	}
   7110 	rxq->rxq_ptr = 0;
   7111 	rxq->rxq_discard = 0;
   7112 	WM_RXCHAIN_RESET(rxq);
   7113 
   7114 	return 0;
   7115 }
   7116 
   7117 static int
   7118 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7119     struct wm_rxqueue *rxq)
   7120 {
   7121 
   7122 	KASSERT(mutex_owned(rxq->rxq_lock));
   7123 
   7124 	/*
   7125 	 * Set up some register offsets that are different between
   7126 	 * the i82542 and the i82543 and later chips.
   7127 	 */
   7128 	if (sc->sc_type < WM_T_82543)
   7129 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7130 	else
   7131 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7132 
   7133 	wm_init_rx_regs(sc, wmq, rxq);
   7134 	return wm_init_rx_buffer(sc, rxq);
   7135 }
   7136 
   7137 /*
   7138  * wm_init_quques:
   7139  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7140  */
   7141 static int
   7142 wm_init_txrx_queues(struct wm_softc *sc)
   7143 {
   7144 	int i, error = 0;
   7145 
   7146 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7147 		device_xname(sc->sc_dev), __func__));
   7148 
   7149 	for (i = 0; i < sc->sc_nqueues; i++) {
   7150 		struct wm_queue *wmq = &sc->sc_queue[i];
   7151 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7152 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7153 
   7154 		/*
   7155 		 * TODO
   7156 		 * Currently, use constant variable instead of AIM.
   7157 		 * Furthermore, the interrupt interval of multiqueue which use
   7158 		 * polling mode is less than default value.
   7159 		 * More tuning and AIM are required.
   7160 		 */
   7161 		if (wm_is_using_multiqueue(sc))
   7162 			wmq->wmq_itr = 50;
   7163 		else
   7164 			wmq->wmq_itr = sc->sc_itr_init;
   7165 		wmq->wmq_set_itr = true;
   7166 
   7167 		mutex_enter(txq->txq_lock);
   7168 		wm_init_tx_queue(sc, wmq, txq);
   7169 		mutex_exit(txq->txq_lock);
   7170 
   7171 		mutex_enter(rxq->rxq_lock);
   7172 		error = wm_init_rx_queue(sc, wmq, rxq);
   7173 		mutex_exit(rxq->rxq_lock);
   7174 		if (error)
   7175 			break;
   7176 	}
   7177 
   7178 	return error;
   7179 }
   7180 
   7181 /*
   7182  * wm_tx_offload:
   7183  *
   7184  *	Set up TCP/IP checksumming parameters for the
   7185  *	specified packet.
   7186  */
   7187 static int
   7188 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7189     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7190 {
   7191 	struct mbuf *m0 = txs->txs_mbuf;
   7192 	struct livengood_tcpip_ctxdesc *t;
   7193 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7194 	uint32_t ipcse;
   7195 	struct ether_header *eh;
   7196 	int offset, iphl;
   7197 	uint8_t fields;
   7198 
   7199 	/*
   7200 	 * XXX It would be nice if the mbuf pkthdr had offset
   7201 	 * fields for the protocol headers.
   7202 	 */
   7203 
   7204 	eh = mtod(m0, struct ether_header *);
   7205 	switch (htons(eh->ether_type)) {
   7206 	case ETHERTYPE_IP:
   7207 	case ETHERTYPE_IPV6:
   7208 		offset = ETHER_HDR_LEN;
   7209 		break;
   7210 
   7211 	case ETHERTYPE_VLAN:
   7212 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7213 		break;
   7214 
   7215 	default:
   7216 		/* Don't support this protocol or encapsulation. */
   7217 		*fieldsp = 0;
   7218 		*cmdp = 0;
   7219 		return 0;
   7220 	}
   7221 
   7222 	if ((m0->m_pkthdr.csum_flags &
   7223 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7224 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7225 	} else
   7226 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7227 
   7228 	ipcse = offset + iphl - 1;
   7229 
   7230 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7231 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7232 	seg = 0;
   7233 	fields = 0;
   7234 
   7235 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7236 		int hlen = offset + iphl;
   7237 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7238 
   7239 		if (__predict_false(m0->m_len <
   7240 				    (hlen + sizeof(struct tcphdr)))) {
   7241 			/*
   7242 			 * TCP/IP headers are not in the first mbuf; we need
   7243 			 * to do this the slow and painful way. Let's just
   7244 			 * hope this doesn't happen very often.
   7245 			 */
   7246 			struct tcphdr th;
   7247 
   7248 			WM_Q_EVCNT_INCR(txq, tsopain);
   7249 
   7250 			m_copydata(m0, hlen, sizeof(th), &th);
   7251 			if (v4) {
   7252 				struct ip ip;
   7253 
   7254 				m_copydata(m0, offset, sizeof(ip), &ip);
   7255 				ip.ip_len = 0;
   7256 				m_copyback(m0,
   7257 				    offset + offsetof(struct ip, ip_len),
   7258 				    sizeof(ip.ip_len), &ip.ip_len);
   7259 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7260 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7261 			} else {
   7262 				struct ip6_hdr ip6;
   7263 
   7264 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7265 				ip6.ip6_plen = 0;
   7266 				m_copyback(m0,
   7267 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7268 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7269 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7270 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7271 			}
   7272 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7273 			    sizeof(th.th_sum), &th.th_sum);
   7274 
   7275 			hlen += th.th_off << 2;
   7276 		} else {
   7277 			/*
   7278 			 * TCP/IP headers are in the first mbuf; we can do
   7279 			 * this the easy way.
   7280 			 */
   7281 			struct tcphdr *th;
   7282 
   7283 			if (v4) {
   7284 				struct ip *ip =
   7285 				    (void *)(mtod(m0, char *) + offset);
   7286 				th = (void *)(mtod(m0, char *) + hlen);
   7287 
   7288 				ip->ip_len = 0;
   7289 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7290 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7291 			} else {
   7292 				struct ip6_hdr *ip6 =
   7293 				    (void *)(mtod(m0, char *) + offset);
   7294 				th = (void *)(mtod(m0, char *) + hlen);
   7295 
   7296 				ip6->ip6_plen = 0;
   7297 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7298 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7299 			}
   7300 			hlen += th->th_off << 2;
   7301 		}
   7302 
   7303 		if (v4) {
   7304 			WM_Q_EVCNT_INCR(txq, tso);
   7305 			cmdlen |= WTX_TCPIP_CMD_IP;
   7306 		} else {
   7307 			WM_Q_EVCNT_INCR(txq, tso6);
   7308 			ipcse = 0;
   7309 		}
   7310 		cmd |= WTX_TCPIP_CMD_TSE;
   7311 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7312 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7313 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7314 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7315 	}
   7316 
   7317 	/*
   7318 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7319 	 * offload feature, if we load the context descriptor, we
   7320 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7321 	 */
   7322 
   7323 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7324 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7325 	    WTX_TCPIP_IPCSE(ipcse);
   7326 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7327 		WM_Q_EVCNT_INCR(txq, ipsum);
   7328 		fields |= WTX_IXSM;
   7329 	}
   7330 
   7331 	offset += iphl;
   7332 
   7333 	if (m0->m_pkthdr.csum_flags &
   7334 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7335 		WM_Q_EVCNT_INCR(txq, tusum);
   7336 		fields |= WTX_TXSM;
   7337 		tucs = WTX_TCPIP_TUCSS(offset) |
   7338 		    WTX_TCPIP_TUCSO(offset +
   7339 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7340 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7341 	} else if ((m0->m_pkthdr.csum_flags &
   7342 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7343 		WM_Q_EVCNT_INCR(txq, tusum6);
   7344 		fields |= WTX_TXSM;
   7345 		tucs = WTX_TCPIP_TUCSS(offset) |
   7346 		    WTX_TCPIP_TUCSO(offset +
   7347 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7348 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7349 	} else {
   7350 		/* Just initialize it to a valid TCP context. */
   7351 		tucs = WTX_TCPIP_TUCSS(offset) |
   7352 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7353 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7354 	}
   7355 
   7356 	/*
   7357 	 * We don't have to write context descriptor for every packet
   7358 	 * except for 82574. For 82574, we must write context descriptor
   7359 	 * for every packet when we use two descriptor queues.
   7360 	 * It would be overhead to write context descriptor for every packet,
   7361 	 * however it does not cause problems.
   7362 	 */
   7363 	/* Fill in the context descriptor. */
   7364 	t = (struct livengood_tcpip_ctxdesc *)
   7365 	    &txq->txq_descs[txq->txq_next];
   7366 	t->tcpip_ipcs = htole32(ipcs);
   7367 	t->tcpip_tucs = htole32(tucs);
   7368 	t->tcpip_cmdlen = htole32(cmdlen);
   7369 	t->tcpip_seg = htole32(seg);
   7370 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7371 
   7372 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7373 	txs->txs_ndesc++;
   7374 
   7375 	*cmdp = cmd;
   7376 	*fieldsp = fields;
   7377 
   7378 	return 0;
   7379 }
   7380 
   7381 static inline int
   7382 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7383 {
   7384 	struct wm_softc *sc = ifp->if_softc;
   7385 	u_int cpuid = cpu_index(curcpu());
   7386 
   7387 	/*
   7388 	 * Currently, simple distribute strategy.
   7389 	 * TODO:
   7390 	 * distribute by flowid(RSS has value).
   7391 	 */
   7392 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7393 }
   7394 
   7395 /*
   7396  * wm_start:		[ifnet interface function]
   7397  *
   7398  *	Start packet transmission on the interface.
   7399  */
   7400 static void
   7401 wm_start(struct ifnet *ifp)
   7402 {
   7403 	struct wm_softc *sc = ifp->if_softc;
   7404 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7405 
   7406 #ifdef WM_MPSAFE
   7407 	KASSERT(if_is_mpsafe(ifp));
   7408 #endif
   7409 	/*
   7410 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7411 	 */
   7412 
   7413 	mutex_enter(txq->txq_lock);
   7414 	if (!txq->txq_stopping)
   7415 		wm_start_locked(ifp);
   7416 	mutex_exit(txq->txq_lock);
   7417 }
   7418 
   7419 static void
   7420 wm_start_locked(struct ifnet *ifp)
   7421 {
   7422 	struct wm_softc *sc = ifp->if_softc;
   7423 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7424 
   7425 	wm_send_common_locked(ifp, txq, false);
   7426 }
   7427 
   7428 static int
   7429 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7430 {
   7431 	int qid;
   7432 	struct wm_softc *sc = ifp->if_softc;
   7433 	struct wm_txqueue *txq;
   7434 
   7435 	qid = wm_select_txqueue(ifp, m);
   7436 	txq = &sc->sc_queue[qid].wmq_txq;
   7437 
   7438 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7439 		m_freem(m);
   7440 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7441 		return ENOBUFS;
   7442 	}
   7443 
   7444 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7445 	ifp->if_obytes += m->m_pkthdr.len;
   7446 	if (m->m_flags & M_MCAST)
   7447 		ifp->if_omcasts++;
   7448 
   7449 	if (mutex_tryenter(txq->txq_lock)) {
   7450 		if (!txq->txq_stopping)
   7451 			wm_transmit_locked(ifp, txq);
   7452 		mutex_exit(txq->txq_lock);
   7453 	}
   7454 
   7455 	return 0;
   7456 }
   7457 
   7458 static void
   7459 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7460 {
   7461 
   7462 	wm_send_common_locked(ifp, txq, true);
   7463 }
   7464 
   7465 static void
   7466 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7467     bool is_transmit)
   7468 {
   7469 	struct wm_softc *sc = ifp->if_softc;
   7470 	struct mbuf *m0;
   7471 	struct wm_txsoft *txs;
   7472 	bus_dmamap_t dmamap;
   7473 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7474 	bus_addr_t curaddr;
   7475 	bus_size_t seglen, curlen;
   7476 	uint32_t cksumcmd;
   7477 	uint8_t cksumfields;
   7478 	bool remap = true;
   7479 
   7480 	KASSERT(mutex_owned(txq->txq_lock));
   7481 
   7482 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7483 		return;
   7484 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7485 		return;
   7486 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7487 		return;
   7488 
   7489 	/* Remember the previous number of free descriptors. */
   7490 	ofree = txq->txq_free;
   7491 
   7492 	/*
   7493 	 * Loop through the send queue, setting up transmit descriptors
   7494 	 * until we drain the queue, or use up all available transmit
   7495 	 * descriptors.
   7496 	 */
   7497 	for (;;) {
   7498 		m0 = NULL;
   7499 
   7500 		/* Get a work queue entry. */
   7501 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7502 			wm_txeof(txq, UINT_MAX);
   7503 			if (txq->txq_sfree == 0) {
   7504 				DPRINTF(WM_DEBUG_TX,
   7505 				    ("%s: TX: no free job descriptors\n",
   7506 					device_xname(sc->sc_dev)));
   7507 				WM_Q_EVCNT_INCR(txq, txsstall);
   7508 				break;
   7509 			}
   7510 		}
   7511 
   7512 		/* Grab a packet off the queue. */
   7513 		if (is_transmit)
   7514 			m0 = pcq_get(txq->txq_interq);
   7515 		else
   7516 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7517 		if (m0 == NULL)
   7518 			break;
   7519 
   7520 		DPRINTF(WM_DEBUG_TX,
   7521 		    ("%s: TX: have packet to transmit: %p\n",
   7522 			device_xname(sc->sc_dev), m0));
   7523 
   7524 		txs = &txq->txq_soft[txq->txq_snext];
   7525 		dmamap = txs->txs_dmamap;
   7526 
   7527 		use_tso = (m0->m_pkthdr.csum_flags &
   7528 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7529 
   7530 		/*
   7531 		 * So says the Linux driver:
   7532 		 * The controller does a simple calculation to make sure
   7533 		 * there is enough room in the FIFO before initiating the
   7534 		 * DMA for each buffer. The calc is:
   7535 		 *	4 = ceil(buffer len / MSS)
   7536 		 * To make sure we don't overrun the FIFO, adjust the max
   7537 		 * buffer len if the MSS drops.
   7538 		 */
   7539 		dmamap->dm_maxsegsz =
   7540 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7541 		    ? m0->m_pkthdr.segsz << 2
   7542 		    : WTX_MAX_LEN;
   7543 
   7544 		/*
   7545 		 * Load the DMA map.  If this fails, the packet either
   7546 		 * didn't fit in the allotted number of segments, or we
   7547 		 * were short on resources.  For the too-many-segments
   7548 		 * case, we simply report an error and drop the packet,
   7549 		 * since we can't sanely copy a jumbo packet to a single
   7550 		 * buffer.
   7551 		 */
   7552 retry:
   7553 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7554 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7555 		if (__predict_false(error)) {
   7556 			if (error == EFBIG) {
   7557 				if (remap == true) {
   7558 					struct mbuf *m;
   7559 
   7560 					remap = false;
   7561 					m = m_defrag(m0, M_NOWAIT);
   7562 					if (m != NULL) {
   7563 						WM_Q_EVCNT_INCR(txq, defrag);
   7564 						m0 = m;
   7565 						goto retry;
   7566 					}
   7567 				}
   7568 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7569 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7570 				    "DMA segments, dropping...\n",
   7571 				    device_xname(sc->sc_dev));
   7572 				wm_dump_mbuf_chain(sc, m0);
   7573 				m_freem(m0);
   7574 				continue;
   7575 			}
   7576 			/* Short on resources, just stop for now. */
   7577 			DPRINTF(WM_DEBUG_TX,
   7578 			    ("%s: TX: dmamap load failed: %d\n",
   7579 				device_xname(sc->sc_dev), error));
   7580 			break;
   7581 		}
   7582 
   7583 		segs_needed = dmamap->dm_nsegs;
   7584 		if (use_tso) {
   7585 			/* For sentinel descriptor; see below. */
   7586 			segs_needed++;
   7587 		}
   7588 
   7589 		/*
   7590 		 * Ensure we have enough descriptors free to describe
   7591 		 * the packet. Note, we always reserve one descriptor
   7592 		 * at the end of the ring due to the semantics of the
   7593 		 * TDT register, plus one more in the event we need
   7594 		 * to load offload context.
   7595 		 */
   7596 		if (segs_needed > txq->txq_free - 2) {
   7597 			/*
   7598 			 * Not enough free descriptors to transmit this
   7599 			 * packet.  We haven't committed anything yet,
   7600 			 * so just unload the DMA map, put the packet
   7601 			 * pack on the queue, and punt. Notify the upper
   7602 			 * layer that there are no more slots left.
   7603 			 */
   7604 			DPRINTF(WM_DEBUG_TX,
   7605 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7606 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7607 				segs_needed, txq->txq_free - 1));
   7608 			if (!is_transmit)
   7609 				ifp->if_flags |= IFF_OACTIVE;
   7610 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7611 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7612 			WM_Q_EVCNT_INCR(txq, txdstall);
   7613 			break;
   7614 		}
   7615 
   7616 		/*
   7617 		 * Check for 82547 Tx FIFO bug. We need to do this
   7618 		 * once we know we can transmit the packet, since we
   7619 		 * do some internal FIFO space accounting here.
   7620 		 */
   7621 		if (sc->sc_type == WM_T_82547 &&
   7622 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7623 			DPRINTF(WM_DEBUG_TX,
   7624 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7625 				device_xname(sc->sc_dev)));
   7626 			if (!is_transmit)
   7627 				ifp->if_flags |= IFF_OACTIVE;
   7628 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7629 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7630 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7631 			break;
   7632 		}
   7633 
   7634 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7635 
   7636 		DPRINTF(WM_DEBUG_TX,
   7637 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7638 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7639 
   7640 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7641 
   7642 		/*
   7643 		 * Store a pointer to the packet so that we can free it
   7644 		 * later.
   7645 		 *
   7646 		 * Initially, we consider the number of descriptors the
   7647 		 * packet uses the number of DMA segments.  This may be
   7648 		 * incremented by 1 if we do checksum offload (a descriptor
   7649 		 * is used to set the checksum context).
   7650 		 */
   7651 		txs->txs_mbuf = m0;
   7652 		txs->txs_firstdesc = txq->txq_next;
   7653 		txs->txs_ndesc = segs_needed;
   7654 
   7655 		/* Set up offload parameters for this packet. */
   7656 		if (m0->m_pkthdr.csum_flags &
   7657 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7658 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7659 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7660 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7661 					  &cksumfields) != 0) {
   7662 				/* Error message already displayed. */
   7663 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7664 				continue;
   7665 			}
   7666 		} else {
   7667 			cksumcmd = 0;
   7668 			cksumfields = 0;
   7669 		}
   7670 
   7671 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7672 
   7673 		/* Sync the DMA map. */
   7674 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7675 		    BUS_DMASYNC_PREWRITE);
   7676 
   7677 		/* Initialize the transmit descriptor. */
   7678 		for (nexttx = txq->txq_next, seg = 0;
   7679 		     seg < dmamap->dm_nsegs; seg++) {
   7680 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7681 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7682 			     seglen != 0;
   7683 			     curaddr += curlen, seglen -= curlen,
   7684 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7685 				curlen = seglen;
   7686 
   7687 				/*
   7688 				 * So says the Linux driver:
   7689 				 * Work around for premature descriptor
   7690 				 * write-backs in TSO mode.  Append a
   7691 				 * 4-byte sentinel descriptor.
   7692 				 */
   7693 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7694 				    curlen > 8)
   7695 					curlen -= 4;
   7696 
   7697 				wm_set_dma_addr(
   7698 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7699 				txq->txq_descs[nexttx].wtx_cmdlen
   7700 				    = htole32(cksumcmd | curlen);
   7701 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7702 				    = 0;
   7703 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7704 				    = cksumfields;
   7705 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7706 				lasttx = nexttx;
   7707 
   7708 				DPRINTF(WM_DEBUG_TX,
   7709 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7710 					"len %#04zx\n",
   7711 					device_xname(sc->sc_dev), nexttx,
   7712 					(uint64_t)curaddr, curlen));
   7713 			}
   7714 		}
   7715 
   7716 		KASSERT(lasttx != -1);
   7717 
   7718 		/*
   7719 		 * Set up the command byte on the last descriptor of
   7720 		 * the packet. If we're in the interrupt delay window,
   7721 		 * delay the interrupt.
   7722 		 */
   7723 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7724 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7725 
   7726 		/*
   7727 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7728 		 * up the descriptor to encapsulate the packet for us.
   7729 		 *
   7730 		 * This is only valid on the last descriptor of the packet.
   7731 		 */
   7732 		if (vlan_has_tag(m0)) {
   7733 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7734 			    htole32(WTX_CMD_VLE);
   7735 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7736 			    = htole16(vlan_get_tag(m0));
   7737 		}
   7738 
   7739 		txs->txs_lastdesc = lasttx;
   7740 
   7741 		DPRINTF(WM_DEBUG_TX,
   7742 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7743 			device_xname(sc->sc_dev),
   7744 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7745 
   7746 		/* Sync the descriptors we're using. */
   7747 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7748 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7749 
   7750 		/* Give the packet to the chip. */
   7751 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7752 
   7753 		DPRINTF(WM_DEBUG_TX,
   7754 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7755 
   7756 		DPRINTF(WM_DEBUG_TX,
   7757 		    ("%s: TX: finished transmitting packet, job %d\n",
   7758 			device_xname(sc->sc_dev), txq->txq_snext));
   7759 
   7760 		/* Advance the tx pointer. */
   7761 		txq->txq_free -= txs->txs_ndesc;
   7762 		txq->txq_next = nexttx;
   7763 
   7764 		txq->txq_sfree--;
   7765 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7766 
   7767 		/* Pass the packet to any BPF listeners. */
   7768 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7769 	}
   7770 
   7771 	if (m0 != NULL) {
   7772 		if (!is_transmit)
   7773 			ifp->if_flags |= IFF_OACTIVE;
   7774 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7775 		WM_Q_EVCNT_INCR(txq, descdrop);
   7776 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7777 			__func__));
   7778 		m_freem(m0);
   7779 	}
   7780 
   7781 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7782 		/* No more slots; notify upper layer. */
   7783 		if (!is_transmit)
   7784 			ifp->if_flags |= IFF_OACTIVE;
   7785 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7786 	}
   7787 
   7788 	if (txq->txq_free != ofree) {
   7789 		/* Set a watchdog timer in case the chip flakes out. */
   7790 		txq->txq_lastsent = time_uptime;
   7791 		txq->txq_sending = true;
   7792 	}
   7793 }
   7794 
   7795 /*
   7796  * wm_nq_tx_offload:
   7797  *
   7798  *	Set up TCP/IP checksumming parameters for the
   7799  *	specified packet, for NEWQUEUE devices
   7800  */
   7801 static int
   7802 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7803     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7804 {
   7805 	struct mbuf *m0 = txs->txs_mbuf;
   7806 	uint32_t vl_len, mssidx, cmdc;
   7807 	struct ether_header *eh;
   7808 	int offset, iphl;
   7809 
   7810 	/*
   7811 	 * XXX It would be nice if the mbuf pkthdr had offset
   7812 	 * fields for the protocol headers.
   7813 	 */
   7814 	*cmdlenp = 0;
   7815 	*fieldsp = 0;
   7816 
   7817 	eh = mtod(m0, struct ether_header *);
   7818 	switch (htons(eh->ether_type)) {
   7819 	case ETHERTYPE_IP:
   7820 	case ETHERTYPE_IPV6:
   7821 		offset = ETHER_HDR_LEN;
   7822 		break;
   7823 
   7824 	case ETHERTYPE_VLAN:
   7825 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7826 		break;
   7827 
   7828 	default:
   7829 		/* Don't support this protocol or encapsulation. */
   7830 		*do_csum = false;
   7831 		return 0;
   7832 	}
   7833 	*do_csum = true;
   7834 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7835 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7836 
   7837 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7838 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7839 
   7840 	if ((m0->m_pkthdr.csum_flags &
   7841 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7842 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7843 	} else {
   7844 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7845 	}
   7846 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7847 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7848 
   7849 	if (vlan_has_tag(m0)) {
   7850 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7851 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7852 		*cmdlenp |= NQTX_CMD_VLE;
   7853 	}
   7854 
   7855 	mssidx = 0;
   7856 
   7857 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7858 		int hlen = offset + iphl;
   7859 		int tcp_hlen;
   7860 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7861 
   7862 		if (__predict_false(m0->m_len <
   7863 				    (hlen + sizeof(struct tcphdr)))) {
   7864 			/*
   7865 			 * TCP/IP headers are not in the first mbuf; we need
   7866 			 * to do this the slow and painful way. Let's just
   7867 			 * hope this doesn't happen very often.
   7868 			 */
   7869 			struct tcphdr th;
   7870 
   7871 			WM_Q_EVCNT_INCR(txq, tsopain);
   7872 
   7873 			m_copydata(m0, hlen, sizeof(th), &th);
   7874 			if (v4) {
   7875 				struct ip ip;
   7876 
   7877 				m_copydata(m0, offset, sizeof(ip), &ip);
   7878 				ip.ip_len = 0;
   7879 				m_copyback(m0,
   7880 				    offset + offsetof(struct ip, ip_len),
   7881 				    sizeof(ip.ip_len), &ip.ip_len);
   7882 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7883 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7884 			} else {
   7885 				struct ip6_hdr ip6;
   7886 
   7887 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7888 				ip6.ip6_plen = 0;
   7889 				m_copyback(m0,
   7890 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7891 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7892 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7893 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7894 			}
   7895 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7896 			    sizeof(th.th_sum), &th.th_sum);
   7897 
   7898 			tcp_hlen = th.th_off << 2;
   7899 		} else {
   7900 			/*
   7901 			 * TCP/IP headers are in the first mbuf; we can do
   7902 			 * this the easy way.
   7903 			 */
   7904 			struct tcphdr *th;
   7905 
   7906 			if (v4) {
   7907 				struct ip *ip =
   7908 				    (void *)(mtod(m0, char *) + offset);
   7909 				th = (void *)(mtod(m0, char *) + hlen);
   7910 
   7911 				ip->ip_len = 0;
   7912 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7913 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7914 			} else {
   7915 				struct ip6_hdr *ip6 =
   7916 				    (void *)(mtod(m0, char *) + offset);
   7917 				th = (void *)(mtod(m0, char *) + hlen);
   7918 
   7919 				ip6->ip6_plen = 0;
   7920 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7921 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7922 			}
   7923 			tcp_hlen = th->th_off << 2;
   7924 		}
   7925 		hlen += tcp_hlen;
   7926 		*cmdlenp |= NQTX_CMD_TSE;
   7927 
   7928 		if (v4) {
   7929 			WM_Q_EVCNT_INCR(txq, tso);
   7930 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7931 		} else {
   7932 			WM_Q_EVCNT_INCR(txq, tso6);
   7933 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7934 		}
   7935 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7936 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7937 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7938 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7939 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7940 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7941 	} else {
   7942 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7943 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7944 	}
   7945 
   7946 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7947 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7948 		cmdc |= NQTXC_CMD_IP4;
   7949 	}
   7950 
   7951 	if (m0->m_pkthdr.csum_flags &
   7952 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7953 		WM_Q_EVCNT_INCR(txq, tusum);
   7954 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7955 			cmdc |= NQTXC_CMD_TCP;
   7956 		else
   7957 			cmdc |= NQTXC_CMD_UDP;
   7958 
   7959 		cmdc |= NQTXC_CMD_IP4;
   7960 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7961 	}
   7962 	if (m0->m_pkthdr.csum_flags &
   7963 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7964 		WM_Q_EVCNT_INCR(txq, tusum6);
   7965 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7966 			cmdc |= NQTXC_CMD_TCP;
   7967 		else
   7968 			cmdc |= NQTXC_CMD_UDP;
   7969 
   7970 		cmdc |= NQTXC_CMD_IP6;
   7971 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7972 	}
   7973 
   7974 	/*
   7975 	 * We don't have to write context descriptor for every packet to
   7976 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7977 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7978 	 * controllers.
   7979 	 * It would be overhead to write context descriptor for every packet,
   7980 	 * however it does not cause problems.
   7981 	 */
   7982 	/* Fill in the context descriptor. */
   7983 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7984 	    htole32(vl_len);
   7985 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7986 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7987 	    htole32(cmdc);
   7988 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7989 	    htole32(mssidx);
   7990 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7991 	DPRINTF(WM_DEBUG_TX,
   7992 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7993 		txq->txq_next, 0, vl_len));
   7994 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7995 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7996 	txs->txs_ndesc++;
   7997 	return 0;
   7998 }
   7999 
   8000 /*
   8001  * wm_nq_start:		[ifnet interface function]
   8002  *
   8003  *	Start packet transmission on the interface for NEWQUEUE devices
   8004  */
   8005 static void
   8006 wm_nq_start(struct ifnet *ifp)
   8007 {
   8008 	struct wm_softc *sc = ifp->if_softc;
   8009 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8010 
   8011 #ifdef WM_MPSAFE
   8012 	KASSERT(if_is_mpsafe(ifp));
   8013 #endif
   8014 	/*
   8015 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8016 	 */
   8017 
   8018 	mutex_enter(txq->txq_lock);
   8019 	if (!txq->txq_stopping)
   8020 		wm_nq_start_locked(ifp);
   8021 	mutex_exit(txq->txq_lock);
   8022 }
   8023 
   8024 static void
   8025 wm_nq_start_locked(struct ifnet *ifp)
   8026 {
   8027 	struct wm_softc *sc = ifp->if_softc;
   8028 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8029 
   8030 	wm_nq_send_common_locked(ifp, txq, false);
   8031 }
   8032 
   8033 static int
   8034 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8035 {
   8036 	int qid;
   8037 	struct wm_softc *sc = ifp->if_softc;
   8038 	struct wm_txqueue *txq;
   8039 
   8040 	qid = wm_select_txqueue(ifp, m);
   8041 	txq = &sc->sc_queue[qid].wmq_txq;
   8042 
   8043 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8044 		m_freem(m);
   8045 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8046 		return ENOBUFS;
   8047 	}
   8048 
   8049 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8050 	ifp->if_obytes += m->m_pkthdr.len;
   8051 	if (m->m_flags & M_MCAST)
   8052 		ifp->if_omcasts++;
   8053 
   8054 	/*
   8055 	 * The situations which this mutex_tryenter() fails at running time
   8056 	 * are below two patterns.
   8057 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8058 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8059 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8060 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8061 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8062 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8063 	 * stuck, either.
   8064 	 */
   8065 	if (mutex_tryenter(txq->txq_lock)) {
   8066 		if (!txq->txq_stopping)
   8067 			wm_nq_transmit_locked(ifp, txq);
   8068 		mutex_exit(txq->txq_lock);
   8069 	}
   8070 
   8071 	return 0;
   8072 }
   8073 
   8074 static void
   8075 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8076 {
   8077 
   8078 	wm_nq_send_common_locked(ifp, txq, true);
   8079 }
   8080 
   8081 static void
   8082 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8083     bool is_transmit)
   8084 {
   8085 	struct wm_softc *sc = ifp->if_softc;
   8086 	struct mbuf *m0;
   8087 	struct wm_txsoft *txs;
   8088 	bus_dmamap_t dmamap;
   8089 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8090 	bool do_csum, sent;
   8091 	bool remap = true;
   8092 
   8093 	KASSERT(mutex_owned(txq->txq_lock));
   8094 
   8095 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8096 		return;
   8097 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8098 		return;
   8099 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8100 		return;
   8101 
   8102 	sent = false;
   8103 
   8104 	/*
   8105 	 * Loop through the send queue, setting up transmit descriptors
   8106 	 * until we drain the queue, or use up all available transmit
   8107 	 * descriptors.
   8108 	 */
   8109 	for (;;) {
   8110 		m0 = NULL;
   8111 
   8112 		/* Get a work queue entry. */
   8113 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8114 			wm_txeof(txq, UINT_MAX);
   8115 			if (txq->txq_sfree == 0) {
   8116 				DPRINTF(WM_DEBUG_TX,
   8117 				    ("%s: TX: no free job descriptors\n",
   8118 					device_xname(sc->sc_dev)));
   8119 				WM_Q_EVCNT_INCR(txq, txsstall);
   8120 				break;
   8121 			}
   8122 		}
   8123 
   8124 		/* Grab a packet off the queue. */
   8125 		if (is_transmit)
   8126 			m0 = pcq_get(txq->txq_interq);
   8127 		else
   8128 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8129 		if (m0 == NULL)
   8130 			break;
   8131 
   8132 		DPRINTF(WM_DEBUG_TX,
   8133 		    ("%s: TX: have packet to transmit: %p\n",
   8134 		    device_xname(sc->sc_dev), m0));
   8135 
   8136 		txs = &txq->txq_soft[txq->txq_snext];
   8137 		dmamap = txs->txs_dmamap;
   8138 
   8139 		/*
   8140 		 * Load the DMA map.  If this fails, the packet either
   8141 		 * didn't fit in the allotted number of segments, or we
   8142 		 * were short on resources.  For the too-many-segments
   8143 		 * case, we simply report an error and drop the packet,
   8144 		 * since we can't sanely copy a jumbo packet to a single
   8145 		 * buffer.
   8146 		 */
   8147 retry:
   8148 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8149 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8150 		if (__predict_false(error)) {
   8151 			if (error == EFBIG) {
   8152 				if (remap == true) {
   8153 					struct mbuf *m;
   8154 
   8155 					remap = false;
   8156 					m = m_defrag(m0, M_NOWAIT);
   8157 					if (m != NULL) {
   8158 						WM_Q_EVCNT_INCR(txq, defrag);
   8159 						m0 = m;
   8160 						goto retry;
   8161 					}
   8162 				}
   8163 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8164 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8165 				    "DMA segments, dropping...\n",
   8166 				    device_xname(sc->sc_dev));
   8167 				wm_dump_mbuf_chain(sc, m0);
   8168 				m_freem(m0);
   8169 				continue;
   8170 			}
   8171 			/* Short on resources, just stop for now. */
   8172 			DPRINTF(WM_DEBUG_TX,
   8173 			    ("%s: TX: dmamap load failed: %d\n",
   8174 				device_xname(sc->sc_dev), error));
   8175 			break;
   8176 		}
   8177 
   8178 		segs_needed = dmamap->dm_nsegs;
   8179 
   8180 		/*
   8181 		 * Ensure we have enough descriptors free to describe
   8182 		 * the packet. Note, we always reserve one descriptor
   8183 		 * at the end of the ring due to the semantics of the
   8184 		 * TDT register, plus one more in the event we need
   8185 		 * to load offload context.
   8186 		 */
   8187 		if (segs_needed > txq->txq_free - 2) {
   8188 			/*
   8189 			 * Not enough free descriptors to transmit this
   8190 			 * packet.  We haven't committed anything yet,
   8191 			 * so just unload the DMA map, put the packet
   8192 			 * pack on the queue, and punt. Notify the upper
   8193 			 * layer that there are no more slots left.
   8194 			 */
   8195 			DPRINTF(WM_DEBUG_TX,
   8196 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8197 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8198 				segs_needed, txq->txq_free - 1));
   8199 			if (!is_transmit)
   8200 				ifp->if_flags |= IFF_OACTIVE;
   8201 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8202 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8203 			WM_Q_EVCNT_INCR(txq, txdstall);
   8204 			break;
   8205 		}
   8206 
   8207 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8208 
   8209 		DPRINTF(WM_DEBUG_TX,
   8210 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8211 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8212 
   8213 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8214 
   8215 		/*
   8216 		 * Store a pointer to the packet so that we can free it
   8217 		 * later.
   8218 		 *
   8219 		 * Initially, we consider the number of descriptors the
   8220 		 * packet uses the number of DMA segments.  This may be
   8221 		 * incremented by 1 if we do checksum offload (a descriptor
   8222 		 * is used to set the checksum context).
   8223 		 */
   8224 		txs->txs_mbuf = m0;
   8225 		txs->txs_firstdesc = txq->txq_next;
   8226 		txs->txs_ndesc = segs_needed;
   8227 
   8228 		/* Set up offload parameters for this packet. */
   8229 		uint32_t cmdlen, fields, dcmdlen;
   8230 		if (m0->m_pkthdr.csum_flags &
   8231 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8232 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8233 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8234 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8235 			    &do_csum) != 0) {
   8236 				/* Error message already displayed. */
   8237 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8238 				continue;
   8239 			}
   8240 		} else {
   8241 			do_csum = false;
   8242 			cmdlen = 0;
   8243 			fields = 0;
   8244 		}
   8245 
   8246 		/* Sync the DMA map. */
   8247 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8248 		    BUS_DMASYNC_PREWRITE);
   8249 
   8250 		/* Initialize the first transmit descriptor. */
   8251 		nexttx = txq->txq_next;
   8252 		if (!do_csum) {
   8253 			/* Setup a legacy descriptor */
   8254 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8255 			    dmamap->dm_segs[0].ds_addr);
   8256 			txq->txq_descs[nexttx].wtx_cmdlen =
   8257 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8258 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8259 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8260 			if (vlan_has_tag(m0)) {
   8261 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8262 				    htole32(WTX_CMD_VLE);
   8263 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8264 				    htole16(vlan_get_tag(m0));
   8265 			} else
   8266 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8267 
   8268 			dcmdlen = 0;
   8269 		} else {
   8270 			/* Setup an advanced data descriptor */
   8271 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8272 			    htole64(dmamap->dm_segs[0].ds_addr);
   8273 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8274 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8275 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8276 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8277 			    htole32(fields);
   8278 			DPRINTF(WM_DEBUG_TX,
   8279 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8280 				device_xname(sc->sc_dev), nexttx,
   8281 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8282 			DPRINTF(WM_DEBUG_TX,
   8283 			    ("\t 0x%08x%08x\n", fields,
   8284 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8285 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8286 		}
   8287 
   8288 		lasttx = nexttx;
   8289 		nexttx = WM_NEXTTX(txq, nexttx);
   8290 		/*
   8291 		 * Fill in the next descriptors. legacy or advanced format
   8292 		 * is the same here
   8293 		 */
   8294 		for (seg = 1; seg < dmamap->dm_nsegs;
   8295 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8296 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8297 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8298 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8299 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8300 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8301 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8302 			lasttx = nexttx;
   8303 
   8304 			DPRINTF(WM_DEBUG_TX,
   8305 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8306 				device_xname(sc->sc_dev), nexttx,
   8307 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8308 				dmamap->dm_segs[seg].ds_len));
   8309 		}
   8310 
   8311 		KASSERT(lasttx != -1);
   8312 
   8313 		/*
   8314 		 * Set up the command byte on the last descriptor of
   8315 		 * the packet. If we're in the interrupt delay window,
   8316 		 * delay the interrupt.
   8317 		 */
   8318 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8319 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8320 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8321 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8322 
   8323 		txs->txs_lastdesc = lasttx;
   8324 
   8325 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8326 		    device_xname(sc->sc_dev),
   8327 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8328 
   8329 		/* Sync the descriptors we're using. */
   8330 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8331 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8332 
   8333 		/* Give the packet to the chip. */
   8334 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8335 		sent = true;
   8336 
   8337 		DPRINTF(WM_DEBUG_TX,
   8338 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8339 
   8340 		DPRINTF(WM_DEBUG_TX,
   8341 		    ("%s: TX: finished transmitting packet, job %d\n",
   8342 			device_xname(sc->sc_dev), txq->txq_snext));
   8343 
   8344 		/* Advance the tx pointer. */
   8345 		txq->txq_free -= txs->txs_ndesc;
   8346 		txq->txq_next = nexttx;
   8347 
   8348 		txq->txq_sfree--;
   8349 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8350 
   8351 		/* Pass the packet to any BPF listeners. */
   8352 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8353 	}
   8354 
   8355 	if (m0 != NULL) {
   8356 		if (!is_transmit)
   8357 			ifp->if_flags |= IFF_OACTIVE;
   8358 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8359 		WM_Q_EVCNT_INCR(txq, descdrop);
   8360 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8361 			__func__));
   8362 		m_freem(m0);
   8363 	}
   8364 
   8365 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8366 		/* No more slots; notify upper layer. */
   8367 		if (!is_transmit)
   8368 			ifp->if_flags |= IFF_OACTIVE;
   8369 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8370 	}
   8371 
   8372 	if (sent) {
   8373 		/* Set a watchdog timer in case the chip flakes out. */
   8374 		txq->txq_lastsent = time_uptime;
   8375 		txq->txq_sending = true;
   8376 	}
   8377 }
   8378 
   8379 static void
   8380 wm_deferred_start_locked(struct wm_txqueue *txq)
   8381 {
   8382 	struct wm_softc *sc = txq->txq_sc;
   8383 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8384 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8385 	int qid = wmq->wmq_id;
   8386 
   8387 	KASSERT(mutex_owned(txq->txq_lock));
   8388 
   8389 	if (txq->txq_stopping) {
   8390 		mutex_exit(txq->txq_lock);
   8391 		return;
   8392 	}
   8393 
   8394 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8395 		/* XXX need for ALTQ or one CPU system */
   8396 		if (qid == 0)
   8397 			wm_nq_start_locked(ifp);
   8398 		wm_nq_transmit_locked(ifp, txq);
   8399 	} else {
   8400 		/* XXX need for ALTQ or one CPU system */
   8401 		if (qid == 0)
   8402 			wm_start_locked(ifp);
   8403 		wm_transmit_locked(ifp, txq);
   8404 	}
   8405 }
   8406 
   8407 /* Interrupt */
   8408 
   8409 /*
   8410  * wm_txeof:
   8411  *
   8412  *	Helper; handle transmit interrupts.
   8413  */
   8414 static bool
   8415 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8416 {
   8417 	struct wm_softc *sc = txq->txq_sc;
   8418 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8419 	struct wm_txsoft *txs;
   8420 	int count = 0;
   8421 	int i;
   8422 	uint8_t status;
   8423 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8424 	bool more = false;
   8425 
   8426 	KASSERT(mutex_owned(txq->txq_lock));
   8427 
   8428 	if (txq->txq_stopping)
   8429 		return false;
   8430 
   8431 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8432 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8433 	if (wmq->wmq_id == 0)
   8434 		ifp->if_flags &= ~IFF_OACTIVE;
   8435 
   8436 	/*
   8437 	 * Go through the Tx list and free mbufs for those
   8438 	 * frames which have been transmitted.
   8439 	 */
   8440 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8441 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8442 		if (limit-- == 0) {
   8443 			more = true;
   8444 			DPRINTF(WM_DEBUG_TX,
   8445 			    ("%s: TX: loop limited, job %d is not processed\n",
   8446 				device_xname(sc->sc_dev), i));
   8447 			break;
   8448 		}
   8449 
   8450 		txs = &txq->txq_soft[i];
   8451 
   8452 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8453 			device_xname(sc->sc_dev), i));
   8454 
   8455 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8456 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8457 
   8458 		status =
   8459 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8460 		if ((status & WTX_ST_DD) == 0) {
   8461 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8462 			    BUS_DMASYNC_PREREAD);
   8463 			break;
   8464 		}
   8465 
   8466 		count++;
   8467 		DPRINTF(WM_DEBUG_TX,
   8468 		    ("%s: TX: job %d done: descs %d..%d\n",
   8469 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8470 		    txs->txs_lastdesc));
   8471 
   8472 		/*
   8473 		 * XXX We should probably be using the statistics
   8474 		 * XXX registers, but I don't know if they exist
   8475 		 * XXX on chips before the i82544.
   8476 		 */
   8477 
   8478 #ifdef WM_EVENT_COUNTERS
   8479 		if (status & WTX_ST_TU)
   8480 			WM_Q_EVCNT_INCR(txq, underrun);
   8481 #endif /* WM_EVENT_COUNTERS */
   8482 
   8483 		/*
   8484 		 * 82574 and newer's document says the status field has neither
   8485 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8486 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8487 		 * Developer's Manual", 82574 datasheet and newer.
   8488 		 *
   8489 		 * XXX I saw the LC bit was set on I218 even though the media
   8490 		 * was full duplex, so the bit might be used for other
   8491 		 * meaning ...(I have no document).
   8492 		 */
   8493 
   8494 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8495 		    && ((sc->sc_type < WM_T_82574)
   8496 			|| (sc->sc_type == WM_T_80003))) {
   8497 			ifp->if_oerrors++;
   8498 			if (status & WTX_ST_LC)
   8499 				log(LOG_WARNING, "%s: late collision\n",
   8500 				    device_xname(sc->sc_dev));
   8501 			else if (status & WTX_ST_EC) {
   8502 				ifp->if_collisions +=
   8503 				    TX_COLLISION_THRESHOLD + 1;
   8504 				log(LOG_WARNING, "%s: excessive collisions\n",
   8505 				    device_xname(sc->sc_dev));
   8506 			}
   8507 		} else
   8508 			ifp->if_opackets++;
   8509 
   8510 		txq->txq_packets++;
   8511 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8512 
   8513 		txq->txq_free += txs->txs_ndesc;
   8514 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8515 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8516 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8517 		m_freem(txs->txs_mbuf);
   8518 		txs->txs_mbuf = NULL;
   8519 	}
   8520 
   8521 	/* Update the dirty transmit buffer pointer. */
   8522 	txq->txq_sdirty = i;
   8523 	DPRINTF(WM_DEBUG_TX,
   8524 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8525 
   8526 	if (count != 0)
   8527 		rnd_add_uint32(&sc->rnd_source, count);
   8528 
   8529 	/*
   8530 	 * If there are no more pending transmissions, cancel the watchdog
   8531 	 * timer.
   8532 	 */
   8533 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8534 		txq->txq_sending = false;
   8535 
   8536 	return more;
   8537 }
   8538 
   8539 static inline uint32_t
   8540 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8541 {
   8542 	struct wm_softc *sc = rxq->rxq_sc;
   8543 
   8544 	if (sc->sc_type == WM_T_82574)
   8545 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8546 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8547 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8548 	else
   8549 		return rxq->rxq_descs[idx].wrx_status;
   8550 }
   8551 
   8552 static inline uint32_t
   8553 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8554 {
   8555 	struct wm_softc *sc = rxq->rxq_sc;
   8556 
   8557 	if (sc->sc_type == WM_T_82574)
   8558 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8559 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8560 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8561 	else
   8562 		return rxq->rxq_descs[idx].wrx_errors;
   8563 }
   8564 
   8565 static inline uint16_t
   8566 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8567 {
   8568 	struct wm_softc *sc = rxq->rxq_sc;
   8569 
   8570 	if (sc->sc_type == WM_T_82574)
   8571 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8572 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8573 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8574 	else
   8575 		return rxq->rxq_descs[idx].wrx_special;
   8576 }
   8577 
   8578 static inline int
   8579 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8580 {
   8581 	struct wm_softc *sc = rxq->rxq_sc;
   8582 
   8583 	if (sc->sc_type == WM_T_82574)
   8584 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8585 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8586 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8587 	else
   8588 		return rxq->rxq_descs[idx].wrx_len;
   8589 }
   8590 
   8591 #ifdef WM_DEBUG
   8592 static inline uint32_t
   8593 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8594 {
   8595 	struct wm_softc *sc = rxq->rxq_sc;
   8596 
   8597 	if (sc->sc_type == WM_T_82574)
   8598 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8599 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8600 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8601 	else
   8602 		return 0;
   8603 }
   8604 
   8605 static inline uint8_t
   8606 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8607 {
   8608 	struct wm_softc *sc = rxq->rxq_sc;
   8609 
   8610 	if (sc->sc_type == WM_T_82574)
   8611 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8612 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8613 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8614 	else
   8615 		return 0;
   8616 }
   8617 #endif /* WM_DEBUG */
   8618 
   8619 static inline bool
   8620 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8621     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8622 {
   8623 
   8624 	if (sc->sc_type == WM_T_82574)
   8625 		return (status & ext_bit) != 0;
   8626 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8627 		return (status & nq_bit) != 0;
   8628 	else
   8629 		return (status & legacy_bit) != 0;
   8630 }
   8631 
   8632 static inline bool
   8633 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8634     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8635 {
   8636 
   8637 	if (sc->sc_type == WM_T_82574)
   8638 		return (error & ext_bit) != 0;
   8639 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8640 		return (error & nq_bit) != 0;
   8641 	else
   8642 		return (error & legacy_bit) != 0;
   8643 }
   8644 
   8645 static inline bool
   8646 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8647 {
   8648 
   8649 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8650 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8651 		return true;
   8652 	else
   8653 		return false;
   8654 }
   8655 
   8656 static inline bool
   8657 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8658 {
   8659 	struct wm_softc *sc = rxq->rxq_sc;
   8660 
   8661 	/* XXX missing error bit for newqueue? */
   8662 	if (wm_rxdesc_is_set_error(sc, errors,
   8663 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8664 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8665 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8666 		NQRXC_ERROR_RXE)) {
   8667 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8668 		    EXTRXC_ERROR_SE, 0))
   8669 			log(LOG_WARNING, "%s: symbol error\n",
   8670 			    device_xname(sc->sc_dev));
   8671 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8672 		    EXTRXC_ERROR_SEQ, 0))
   8673 			log(LOG_WARNING, "%s: receive sequence error\n",
   8674 			    device_xname(sc->sc_dev));
   8675 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8676 		    EXTRXC_ERROR_CE, 0))
   8677 			log(LOG_WARNING, "%s: CRC error\n",
   8678 			    device_xname(sc->sc_dev));
   8679 		return true;
   8680 	}
   8681 
   8682 	return false;
   8683 }
   8684 
   8685 static inline bool
   8686 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8687 {
   8688 	struct wm_softc *sc = rxq->rxq_sc;
   8689 
   8690 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8691 		NQRXC_STATUS_DD)) {
   8692 		/* We have processed all of the receive descriptors. */
   8693 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8694 		return false;
   8695 	}
   8696 
   8697 	return true;
   8698 }
   8699 
   8700 static inline bool
   8701 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8702     uint16_t vlantag, struct mbuf *m)
   8703 {
   8704 
   8705 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8706 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8707 		vlan_set_tag(m, le16toh(vlantag));
   8708 	}
   8709 
   8710 	return true;
   8711 }
   8712 
   8713 static inline void
   8714 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8715     uint32_t errors, struct mbuf *m)
   8716 {
   8717 	struct wm_softc *sc = rxq->rxq_sc;
   8718 
   8719 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8720 		if (wm_rxdesc_is_set_status(sc, status,
   8721 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8722 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8723 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8724 			if (wm_rxdesc_is_set_error(sc, errors,
   8725 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8726 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8727 		}
   8728 		if (wm_rxdesc_is_set_status(sc, status,
   8729 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8730 			/*
   8731 			 * Note: we don't know if this was TCP or UDP,
   8732 			 * so we just set both bits, and expect the
   8733 			 * upper layers to deal.
   8734 			 */
   8735 			WM_Q_EVCNT_INCR(rxq, tusum);
   8736 			m->m_pkthdr.csum_flags |=
   8737 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8738 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8739 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8740 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8741 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8742 		}
   8743 	}
   8744 }
   8745 
   8746 /*
   8747  * wm_rxeof:
   8748  *
   8749  *	Helper; handle receive interrupts.
   8750  */
   8751 static bool
   8752 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8753 {
   8754 	struct wm_softc *sc = rxq->rxq_sc;
   8755 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8756 	struct wm_rxsoft *rxs;
   8757 	struct mbuf *m;
   8758 	int i, len;
   8759 	int count = 0;
   8760 	uint32_t status, errors;
   8761 	uint16_t vlantag;
   8762 	bool more = false;
   8763 
   8764 	KASSERT(mutex_owned(rxq->rxq_lock));
   8765 
   8766 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8767 		if (limit-- == 0) {
   8768 			rxq->rxq_ptr = i;
   8769 			more = true;
   8770 			DPRINTF(WM_DEBUG_RX,
   8771 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8772 				device_xname(sc->sc_dev), i));
   8773 			break;
   8774 		}
   8775 
   8776 		rxs = &rxq->rxq_soft[i];
   8777 
   8778 		DPRINTF(WM_DEBUG_RX,
   8779 		    ("%s: RX: checking descriptor %d\n",
   8780 			device_xname(sc->sc_dev), i));
   8781 		wm_cdrxsync(rxq, i,
   8782 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8783 
   8784 		status = wm_rxdesc_get_status(rxq, i);
   8785 		errors = wm_rxdesc_get_errors(rxq, i);
   8786 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8787 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8788 #ifdef WM_DEBUG
   8789 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8790 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8791 #endif
   8792 
   8793 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8794 			/*
   8795 			 * Update the receive pointer holding rxq_lock
   8796 			 * consistent with increment counter.
   8797 			 */
   8798 			rxq->rxq_ptr = i;
   8799 			break;
   8800 		}
   8801 
   8802 		count++;
   8803 		if (__predict_false(rxq->rxq_discard)) {
   8804 			DPRINTF(WM_DEBUG_RX,
   8805 			    ("%s: RX: discarding contents of descriptor %d\n",
   8806 				device_xname(sc->sc_dev), i));
   8807 			wm_init_rxdesc(rxq, i);
   8808 			if (wm_rxdesc_is_eop(rxq, status)) {
   8809 				/* Reset our state. */
   8810 				DPRINTF(WM_DEBUG_RX,
   8811 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8812 					device_xname(sc->sc_dev)));
   8813 				rxq->rxq_discard = 0;
   8814 			}
   8815 			continue;
   8816 		}
   8817 
   8818 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8819 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8820 
   8821 		m = rxs->rxs_mbuf;
   8822 
   8823 		/*
   8824 		 * Add a new receive buffer to the ring, unless of
   8825 		 * course the length is zero. Treat the latter as a
   8826 		 * failed mapping.
   8827 		 */
   8828 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8829 			/*
   8830 			 * Failed, throw away what we've done so
   8831 			 * far, and discard the rest of the packet.
   8832 			 */
   8833 			ifp->if_ierrors++;
   8834 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8835 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8836 			wm_init_rxdesc(rxq, i);
   8837 			if (!wm_rxdesc_is_eop(rxq, status))
   8838 				rxq->rxq_discard = 1;
   8839 			if (rxq->rxq_head != NULL)
   8840 				m_freem(rxq->rxq_head);
   8841 			WM_RXCHAIN_RESET(rxq);
   8842 			DPRINTF(WM_DEBUG_RX,
   8843 			    ("%s: RX: Rx buffer allocation failed, "
   8844 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8845 				rxq->rxq_discard ? " (discard)" : ""));
   8846 			continue;
   8847 		}
   8848 
   8849 		m->m_len = len;
   8850 		rxq->rxq_len += len;
   8851 		DPRINTF(WM_DEBUG_RX,
   8852 		    ("%s: RX: buffer at %p len %d\n",
   8853 			device_xname(sc->sc_dev), m->m_data, len));
   8854 
   8855 		/* If this is not the end of the packet, keep looking. */
   8856 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8857 			WM_RXCHAIN_LINK(rxq, m);
   8858 			DPRINTF(WM_DEBUG_RX,
   8859 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8860 				device_xname(sc->sc_dev), rxq->rxq_len));
   8861 			continue;
   8862 		}
   8863 
   8864 		/*
   8865 		 * Okay, we have the entire packet now. The chip is
   8866 		 * configured to include the FCS except I350 and I21[01]
   8867 		 * (not all chips can be configured to strip it),
   8868 		 * so we need to trim it.
   8869 		 * May need to adjust length of previous mbuf in the
   8870 		 * chain if the current mbuf is too short.
   8871 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8872 		 * is always set in I350, so we don't trim it.
   8873 		 */
   8874 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8875 		    && (sc->sc_type != WM_T_I210)
   8876 		    && (sc->sc_type != WM_T_I211)) {
   8877 			if (m->m_len < ETHER_CRC_LEN) {
   8878 				rxq->rxq_tail->m_len
   8879 				    -= (ETHER_CRC_LEN - m->m_len);
   8880 				m->m_len = 0;
   8881 			} else
   8882 				m->m_len -= ETHER_CRC_LEN;
   8883 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8884 		} else
   8885 			len = rxq->rxq_len;
   8886 
   8887 		WM_RXCHAIN_LINK(rxq, m);
   8888 
   8889 		*rxq->rxq_tailp = NULL;
   8890 		m = rxq->rxq_head;
   8891 
   8892 		WM_RXCHAIN_RESET(rxq);
   8893 
   8894 		DPRINTF(WM_DEBUG_RX,
   8895 		    ("%s: RX: have entire packet, len -> %d\n",
   8896 			device_xname(sc->sc_dev), len));
   8897 
   8898 		/* If an error occurred, update stats and drop the packet. */
   8899 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8900 			m_freem(m);
   8901 			continue;
   8902 		}
   8903 
   8904 		/* No errors.  Receive the packet. */
   8905 		m_set_rcvif(m, ifp);
   8906 		m->m_pkthdr.len = len;
   8907 		/*
   8908 		 * TODO
   8909 		 * should be save rsshash and rsstype to this mbuf.
   8910 		 */
   8911 		DPRINTF(WM_DEBUG_RX,
   8912 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8913 			device_xname(sc->sc_dev), rsstype, rsshash));
   8914 
   8915 		/*
   8916 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8917 		 * for us.  Associate the tag with the packet.
   8918 		 */
   8919 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8920 			continue;
   8921 
   8922 		/* Set up checksum info for this packet. */
   8923 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8924 		/*
   8925 		 * Update the receive pointer holding rxq_lock consistent with
   8926 		 * increment counter.
   8927 		 */
   8928 		rxq->rxq_ptr = i;
   8929 		rxq->rxq_packets++;
   8930 		rxq->rxq_bytes += len;
   8931 		mutex_exit(rxq->rxq_lock);
   8932 
   8933 		/* Pass it on. */
   8934 		if_percpuq_enqueue(sc->sc_ipq, m);
   8935 
   8936 		mutex_enter(rxq->rxq_lock);
   8937 
   8938 		if (rxq->rxq_stopping)
   8939 			break;
   8940 	}
   8941 
   8942 	if (count != 0)
   8943 		rnd_add_uint32(&sc->rnd_source, count);
   8944 
   8945 	DPRINTF(WM_DEBUG_RX,
   8946 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8947 
   8948 	return more;
   8949 }
   8950 
   8951 /*
   8952  * wm_linkintr_gmii:
   8953  *
   8954  *	Helper; handle link interrupts for GMII.
   8955  */
   8956 static void
   8957 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8958 {
   8959 	device_t dev = sc->sc_dev;
   8960 	uint32_t status, reg;
   8961 	bool link;
   8962 	int rv;
   8963 
   8964 	KASSERT(WM_CORE_LOCKED(sc));
   8965 
   8966 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8967 		__func__));
   8968 
   8969 	if ((icr & ICR_LSC) == 0) {
   8970 		if (icr & ICR_RXSEQ)
   8971 			DPRINTF(WM_DEBUG_LINK,
   8972 			    ("%s: LINK Receive sequence error\n",
   8973 				device_xname(dev)));
   8974 		return;
   8975 	}
   8976 
   8977 	/* Link status changed */
   8978 	status = CSR_READ(sc, WMREG_STATUS);
   8979 	link = status & STATUS_LU;
   8980 	if (link) {
   8981 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8982 			device_xname(dev),
   8983 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8984 	} else {
   8985 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8986 			device_xname(dev)));
   8987 	}
   8988 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8989 		wm_gig_downshift_workaround_ich8lan(sc);
   8990 
   8991 	if ((sc->sc_type == WM_T_ICH8)
   8992 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8993 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8994 	}
   8995 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8996 		device_xname(dev)));
   8997 	mii_pollstat(&sc->sc_mii);
   8998 	if (sc->sc_type == WM_T_82543) {
   8999 		int miistatus, active;
   9000 
   9001 		/*
   9002 		 * With 82543, we need to force speed and
   9003 		 * duplex on the MAC equal to what the PHY
   9004 		 * speed and duplex configuration is.
   9005 		 */
   9006 		miistatus = sc->sc_mii.mii_media_status;
   9007 
   9008 		if (miistatus & IFM_ACTIVE) {
   9009 			active = sc->sc_mii.mii_media_active;
   9010 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9011 			switch (IFM_SUBTYPE(active)) {
   9012 			case IFM_10_T:
   9013 				sc->sc_ctrl |= CTRL_SPEED_10;
   9014 				break;
   9015 			case IFM_100_TX:
   9016 				sc->sc_ctrl |= CTRL_SPEED_100;
   9017 				break;
   9018 			case IFM_1000_T:
   9019 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9020 				break;
   9021 			default:
   9022 				/*
   9023 				 * Fiber?
   9024 				 * Shoud not enter here.
   9025 				 */
   9026 				printf("unknown media (%x)\n", active);
   9027 				break;
   9028 			}
   9029 			if (active & IFM_FDX)
   9030 				sc->sc_ctrl |= CTRL_FD;
   9031 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9032 		}
   9033 	} else if (sc->sc_type == WM_T_PCH) {
   9034 		wm_k1_gig_workaround_hv(sc,
   9035 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9036 	}
   9037 
   9038 	/*
   9039 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9040 	 * aggressive resulting in many collisions. To avoid this, increase
   9041 	 * the IPG and reduce Rx latency in the PHY.
   9042 	 */
   9043 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9044 	    && link) {
   9045 		uint32_t tipg_reg;
   9046 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9047 		bool fdx;
   9048 		uint16_t emi_addr, emi_val;
   9049 
   9050 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9051 		tipg_reg &= ~TIPG_IPGT_MASK;
   9052 		fdx = status & STATUS_FD;
   9053 
   9054 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9055 			tipg_reg |= 0xff;
   9056 			/* Reduce Rx latency in analog PHY */
   9057 			emi_val = 0;
   9058 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9059 		    fdx && speed != STATUS_SPEED_1000) {
   9060 			tipg_reg |= 0xc;
   9061 			emi_val = 1;
   9062 		} else {
   9063 			/* Roll back the default values */
   9064 			tipg_reg |= 0x08;
   9065 			emi_val = 1;
   9066 		}
   9067 
   9068 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9069 
   9070 		rv = sc->phy.acquire(sc);
   9071 		if (rv)
   9072 			return;
   9073 
   9074 		if (sc->sc_type == WM_T_PCH2)
   9075 			emi_addr = I82579_RX_CONFIG;
   9076 		else
   9077 			emi_addr = I217_RX_CONFIG;
   9078 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9079 
   9080 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9081 			uint16_t phy_reg;
   9082 
   9083 			sc->phy.readreg_locked(dev, 2,
   9084 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9085 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9086 			if (speed == STATUS_SPEED_100
   9087 			    || speed == STATUS_SPEED_10)
   9088 				phy_reg |= 0x3e8;
   9089 			else
   9090 				phy_reg |= 0xfa;
   9091 			sc->phy.writereg_locked(dev, 2,
   9092 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9093 
   9094 			if (speed == STATUS_SPEED_1000) {
   9095 				sc->phy.readreg_locked(dev, 2,
   9096 				    HV_PM_CTRL, &phy_reg);
   9097 
   9098 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9099 
   9100 				sc->phy.writereg_locked(dev, 2,
   9101 				    HV_PM_CTRL, phy_reg);
   9102 			}
   9103 		}
   9104 		sc->phy.release(sc);
   9105 
   9106 		if (rv)
   9107 			return;
   9108 
   9109 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9110 			uint16_t data, ptr_gap;
   9111 
   9112 			if (speed == STATUS_SPEED_1000) {
   9113 				rv = sc->phy.acquire(sc);
   9114 				if (rv)
   9115 					return;
   9116 
   9117 				rv = sc->phy.readreg_locked(dev, 2,
   9118 				    I219_UNKNOWN1, &data);
   9119 				if (rv) {
   9120 					sc->phy.release(sc);
   9121 					return;
   9122 				}
   9123 
   9124 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9125 				if (ptr_gap < 0x18) {
   9126 					data &= ~(0x3ff << 2);
   9127 					data |= (0x18 << 2);
   9128 					rv = sc->phy.writereg_locked(dev,
   9129 					    2, I219_UNKNOWN1, data);
   9130 				}
   9131 				sc->phy.release(sc);
   9132 				if (rv)
   9133 					return;
   9134 			} else {
   9135 				rv = sc->phy.acquire(sc);
   9136 				if (rv)
   9137 					return;
   9138 
   9139 				rv = sc->phy.writereg_locked(dev, 2,
   9140 				    I219_UNKNOWN1, 0xc023);
   9141 				sc->phy.release(sc);
   9142 				if (rv)
   9143 					return;
   9144 
   9145 			}
   9146 		}
   9147 	}
   9148 
   9149 	/*
   9150 	 * I217 Packet Loss issue:
   9151 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9152 	 * on power up.
   9153 	 * Set the Beacon Duration for I217 to 8 usec
   9154 	 */
   9155 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9156 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9157 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9158 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9159 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9160 	}
   9161 
   9162 	/* Work-around I218 hang issue */
   9163 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9164 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9165 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9166 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9167 		wm_k1_workaround_lpt_lp(sc, link);
   9168 
   9169 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9170 		/*
   9171 		 * Set platform power management values for Latency
   9172 		 * Tolerance Reporting (LTR)
   9173 		 */
   9174 		wm_platform_pm_pch_lpt(sc,
   9175 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9176 	}
   9177 
   9178 	/* Clear link partner's EEE ability */
   9179 	sc->eee_lp_ability = 0;
   9180 
   9181 	/* FEXTNVM6 K1-off workaround */
   9182 	if (sc->sc_type == WM_T_PCH_SPT) {
   9183 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9184 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9185 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9186 		else
   9187 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9188 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9189 	}
   9190 
   9191 	if (!link)
   9192 		return;
   9193 
   9194 	switch (sc->sc_type) {
   9195 	case WM_T_PCH2:
   9196 		wm_k1_workaround_lv(sc);
   9197 		/* FALLTHROUGH */
   9198 	case WM_T_PCH:
   9199 		if (sc->sc_phytype == WMPHY_82578)
   9200 			wm_link_stall_workaround_hv(sc);
   9201 		break;
   9202 	default:
   9203 		break;
   9204 	}
   9205 
   9206 	/* Enable/Disable EEE after link up */
   9207 	if (sc->sc_phytype > WMPHY_82579)
   9208 		wm_set_eee_pchlan(sc);
   9209 }
   9210 
   9211 /*
   9212  * wm_linkintr_tbi:
   9213  *
   9214  *	Helper; handle link interrupts for TBI mode.
   9215  */
   9216 static void
   9217 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9218 {
   9219 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9220 	uint32_t status;
   9221 
   9222 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9223 		__func__));
   9224 
   9225 	status = CSR_READ(sc, WMREG_STATUS);
   9226 	if (icr & ICR_LSC) {
   9227 		wm_check_for_link(sc);
   9228 		if (status & STATUS_LU) {
   9229 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9230 				device_xname(sc->sc_dev),
   9231 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9232 			/*
   9233 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9234 			 * so we should update sc->sc_ctrl
   9235 			 */
   9236 
   9237 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9238 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9239 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9240 			if (status & STATUS_FD)
   9241 				sc->sc_tctl |=
   9242 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9243 			else
   9244 				sc->sc_tctl |=
   9245 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9246 			if (sc->sc_ctrl & CTRL_TFCE)
   9247 				sc->sc_fcrtl |= FCRTL_XONE;
   9248 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9249 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9250 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9251 			sc->sc_tbi_linkup = 1;
   9252 			if_link_state_change(ifp, LINK_STATE_UP);
   9253 		} else {
   9254 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9255 				device_xname(sc->sc_dev)));
   9256 			sc->sc_tbi_linkup = 0;
   9257 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9258 		}
   9259 		/* Update LED */
   9260 		wm_tbi_serdes_set_linkled(sc);
   9261 	} else if (icr & ICR_RXSEQ)
   9262 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9263 			device_xname(sc->sc_dev)));
   9264 }
   9265 
   9266 /*
   9267  * wm_linkintr_serdes:
   9268  *
   9269  *	Helper; handle link interrupts for TBI mode.
   9270  */
   9271 static void
   9272 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9273 {
   9274 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9275 	struct mii_data *mii = &sc->sc_mii;
   9276 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9277 	uint32_t pcs_adv, pcs_lpab, reg;
   9278 
   9279 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9280 		__func__));
   9281 
   9282 	if (icr & ICR_LSC) {
   9283 		/* Check PCS */
   9284 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9285 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9286 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9287 				device_xname(sc->sc_dev)));
   9288 			mii->mii_media_status |= IFM_ACTIVE;
   9289 			sc->sc_tbi_linkup = 1;
   9290 			if_link_state_change(ifp, LINK_STATE_UP);
   9291 		} else {
   9292 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9293 				device_xname(sc->sc_dev)));
   9294 			mii->mii_media_status |= IFM_NONE;
   9295 			sc->sc_tbi_linkup = 0;
   9296 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9297 			wm_tbi_serdes_set_linkled(sc);
   9298 			return;
   9299 		}
   9300 		mii->mii_media_active |= IFM_1000_SX;
   9301 		if ((reg & PCS_LSTS_FDX) != 0)
   9302 			mii->mii_media_active |= IFM_FDX;
   9303 		else
   9304 			mii->mii_media_active |= IFM_HDX;
   9305 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9306 			/* Check flow */
   9307 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9308 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9309 				DPRINTF(WM_DEBUG_LINK,
   9310 				    ("XXX LINKOK but not ACOMP\n"));
   9311 				return;
   9312 			}
   9313 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9314 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9315 			DPRINTF(WM_DEBUG_LINK,
   9316 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9317 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9318 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9319 				mii->mii_media_active |= IFM_FLOW
   9320 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9321 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9322 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9323 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9324 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9325 				mii->mii_media_active |= IFM_FLOW
   9326 				    | IFM_ETH_TXPAUSE;
   9327 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9328 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9329 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9330 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9331 				mii->mii_media_active |= IFM_FLOW
   9332 				    | IFM_ETH_RXPAUSE;
   9333 		}
   9334 		/* Update LED */
   9335 		wm_tbi_serdes_set_linkled(sc);
   9336 	} else
   9337 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9338 		    device_xname(sc->sc_dev)));
   9339 }
   9340 
   9341 /*
   9342  * wm_linkintr:
   9343  *
   9344  *	Helper; handle link interrupts.
   9345  */
   9346 static void
   9347 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9348 {
   9349 
   9350 	KASSERT(WM_CORE_LOCKED(sc));
   9351 
   9352 	if (sc->sc_flags & WM_F_HAS_MII)
   9353 		wm_linkintr_gmii(sc, icr);
   9354 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9355 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9356 		wm_linkintr_serdes(sc, icr);
   9357 	else
   9358 		wm_linkintr_tbi(sc, icr);
   9359 }
   9360 
   9361 /*
   9362  * wm_intr_legacy:
   9363  *
   9364  *	Interrupt service routine for INTx and MSI.
   9365  */
   9366 static int
   9367 wm_intr_legacy(void *arg)
   9368 {
   9369 	struct wm_softc *sc = arg;
   9370 	struct wm_queue *wmq = &sc->sc_queue[0];
   9371 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9372 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9373 	uint32_t icr, rndval = 0;
   9374 	int handled = 0;
   9375 
   9376 	while (1 /* CONSTCOND */) {
   9377 		icr = CSR_READ(sc, WMREG_ICR);
   9378 		if ((icr & sc->sc_icr) == 0)
   9379 			break;
   9380 		if (handled == 0)
   9381 			DPRINTF(WM_DEBUG_TX,
   9382 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9383 		if (rndval == 0)
   9384 			rndval = icr;
   9385 
   9386 		mutex_enter(rxq->rxq_lock);
   9387 
   9388 		if (rxq->rxq_stopping) {
   9389 			mutex_exit(rxq->rxq_lock);
   9390 			break;
   9391 		}
   9392 
   9393 		handled = 1;
   9394 
   9395 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9396 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9397 			DPRINTF(WM_DEBUG_RX,
   9398 			    ("%s: RX: got Rx intr 0x%08x\n",
   9399 				device_xname(sc->sc_dev),
   9400 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9401 			WM_Q_EVCNT_INCR(rxq, intr);
   9402 		}
   9403 #endif
   9404 		/*
   9405 		 * wm_rxeof() does *not* call upper layer functions directly,
   9406 		 * as if_percpuq_enqueue() just call softint_schedule().
   9407 		 * So, we can call wm_rxeof() in interrupt context.
   9408 		 */
   9409 		wm_rxeof(rxq, UINT_MAX);
   9410 
   9411 		mutex_exit(rxq->rxq_lock);
   9412 		mutex_enter(txq->txq_lock);
   9413 
   9414 		if (txq->txq_stopping) {
   9415 			mutex_exit(txq->txq_lock);
   9416 			break;
   9417 		}
   9418 
   9419 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9420 		if (icr & ICR_TXDW) {
   9421 			DPRINTF(WM_DEBUG_TX,
   9422 			    ("%s: TX: got TXDW interrupt\n",
   9423 				device_xname(sc->sc_dev)));
   9424 			WM_Q_EVCNT_INCR(txq, txdw);
   9425 		}
   9426 #endif
   9427 		wm_txeof(txq, UINT_MAX);
   9428 
   9429 		mutex_exit(txq->txq_lock);
   9430 		WM_CORE_LOCK(sc);
   9431 
   9432 		if (sc->sc_core_stopping) {
   9433 			WM_CORE_UNLOCK(sc);
   9434 			break;
   9435 		}
   9436 
   9437 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9438 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9439 			wm_linkintr(sc, icr);
   9440 		}
   9441 
   9442 		WM_CORE_UNLOCK(sc);
   9443 
   9444 		if (icr & ICR_RXO) {
   9445 #if defined(WM_DEBUG)
   9446 			log(LOG_WARNING, "%s: Receive overrun\n",
   9447 			    device_xname(sc->sc_dev));
   9448 #endif /* defined(WM_DEBUG) */
   9449 		}
   9450 	}
   9451 
   9452 	rnd_add_uint32(&sc->rnd_source, rndval);
   9453 
   9454 	if (handled) {
   9455 		/* Try to get more packets going. */
   9456 		softint_schedule(wmq->wmq_si);
   9457 	}
   9458 
   9459 	return handled;
   9460 }
   9461 
   9462 static inline void
   9463 wm_txrxintr_disable(struct wm_queue *wmq)
   9464 {
   9465 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9466 
   9467 	if (sc->sc_type == WM_T_82574)
   9468 		CSR_WRITE(sc, WMREG_IMC,
   9469 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9470 	else if (sc->sc_type == WM_T_82575)
   9471 		CSR_WRITE(sc, WMREG_EIMC,
   9472 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9473 	else
   9474 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9475 }
   9476 
   9477 static inline void
   9478 wm_txrxintr_enable(struct wm_queue *wmq)
   9479 {
   9480 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9481 
   9482 	wm_itrs_calculate(sc, wmq);
   9483 
   9484 	/*
   9485 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9486 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9487 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9488 	 * while each wm_handle_queue(wmq) is runnig.
   9489 	 */
   9490 	if (sc->sc_type == WM_T_82574)
   9491 		CSR_WRITE(sc, WMREG_IMS,
   9492 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9493 	else if (sc->sc_type == WM_T_82575)
   9494 		CSR_WRITE(sc, WMREG_EIMS,
   9495 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9496 	else
   9497 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9498 }
   9499 
   9500 static int
   9501 wm_txrxintr_msix(void *arg)
   9502 {
   9503 	struct wm_queue *wmq = arg;
   9504 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9505 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9506 	struct wm_softc *sc = txq->txq_sc;
   9507 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9508 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9509 	bool txmore;
   9510 	bool rxmore;
   9511 
   9512 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9513 
   9514 	DPRINTF(WM_DEBUG_TX,
   9515 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9516 
   9517 	wm_txrxintr_disable(wmq);
   9518 
   9519 	mutex_enter(txq->txq_lock);
   9520 
   9521 	if (txq->txq_stopping) {
   9522 		mutex_exit(txq->txq_lock);
   9523 		return 0;
   9524 	}
   9525 
   9526 	WM_Q_EVCNT_INCR(txq, txdw);
   9527 	txmore = wm_txeof(txq, txlimit);
   9528 	/* wm_deferred start() is done in wm_handle_queue(). */
   9529 	mutex_exit(txq->txq_lock);
   9530 
   9531 	DPRINTF(WM_DEBUG_RX,
   9532 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9533 	mutex_enter(rxq->rxq_lock);
   9534 
   9535 	if (rxq->rxq_stopping) {
   9536 		mutex_exit(rxq->rxq_lock);
   9537 		return 0;
   9538 	}
   9539 
   9540 	WM_Q_EVCNT_INCR(rxq, intr);
   9541 	rxmore = wm_rxeof(rxq, rxlimit);
   9542 	mutex_exit(rxq->rxq_lock);
   9543 
   9544 	wm_itrs_writereg(sc, wmq);
   9545 
   9546 	if (txmore || rxmore)
   9547 		softint_schedule(wmq->wmq_si);
   9548 	else
   9549 		wm_txrxintr_enable(wmq);
   9550 
   9551 	return 1;
   9552 }
   9553 
   9554 static void
   9555 wm_handle_queue(void *arg)
   9556 {
   9557 	struct wm_queue *wmq = arg;
   9558 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9559 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9560 	struct wm_softc *sc = txq->txq_sc;
   9561 	u_int txlimit = sc->sc_tx_process_limit;
   9562 	u_int rxlimit = sc->sc_rx_process_limit;
   9563 	bool txmore;
   9564 	bool rxmore;
   9565 
   9566 	mutex_enter(txq->txq_lock);
   9567 	if (txq->txq_stopping) {
   9568 		mutex_exit(txq->txq_lock);
   9569 		return;
   9570 	}
   9571 	txmore = wm_txeof(txq, txlimit);
   9572 	wm_deferred_start_locked(txq);
   9573 	mutex_exit(txq->txq_lock);
   9574 
   9575 	mutex_enter(rxq->rxq_lock);
   9576 	if (rxq->rxq_stopping) {
   9577 		mutex_exit(rxq->rxq_lock);
   9578 		return;
   9579 	}
   9580 	WM_Q_EVCNT_INCR(rxq, defer);
   9581 	rxmore = wm_rxeof(rxq, rxlimit);
   9582 	mutex_exit(rxq->rxq_lock);
   9583 
   9584 	if (txmore || rxmore)
   9585 		softint_schedule(wmq->wmq_si);
   9586 	else
   9587 		wm_txrxintr_enable(wmq);
   9588 }
   9589 
   9590 /*
   9591  * wm_linkintr_msix:
   9592  *
   9593  *	Interrupt service routine for link status change for MSI-X.
   9594  */
   9595 static int
   9596 wm_linkintr_msix(void *arg)
   9597 {
   9598 	struct wm_softc *sc = arg;
   9599 	uint32_t reg;
   9600 	bool has_rxo;
   9601 
   9602 	DPRINTF(WM_DEBUG_LINK,
   9603 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9604 
   9605 	reg = CSR_READ(sc, WMREG_ICR);
   9606 	WM_CORE_LOCK(sc);
   9607 	if (sc->sc_core_stopping)
   9608 		goto out;
   9609 
   9610 	if ((reg & ICR_LSC) != 0) {
   9611 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9612 		wm_linkintr(sc, ICR_LSC);
   9613 	}
   9614 
   9615 	/*
   9616 	 * XXX 82574 MSI-X mode workaround
   9617 	 *
   9618 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9619 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9620 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9621 	 * interrupts by writing WMREG_ICS to process receive packets.
   9622 	 */
   9623 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9624 #if defined(WM_DEBUG)
   9625 		log(LOG_WARNING, "%s: Receive overrun\n",
   9626 		    device_xname(sc->sc_dev));
   9627 #endif /* defined(WM_DEBUG) */
   9628 
   9629 		has_rxo = true;
   9630 		/*
   9631 		 * The RXO interrupt is very high rate when receive traffic is
   9632 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9633 		 * interrupts. ICR_OTHER will be enabled at the end of
   9634 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9635 		 * ICR_RXQ(1) interrupts.
   9636 		 */
   9637 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9638 
   9639 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9640 	}
   9641 
   9642 
   9643 
   9644 out:
   9645 	WM_CORE_UNLOCK(sc);
   9646 
   9647 	if (sc->sc_type == WM_T_82574) {
   9648 		if (!has_rxo)
   9649 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9650 		else
   9651 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9652 	} else if (sc->sc_type == WM_T_82575)
   9653 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9654 	else
   9655 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9656 
   9657 	return 1;
   9658 }
   9659 
   9660 /*
   9661  * Media related.
   9662  * GMII, SGMII, TBI (and SERDES)
   9663  */
   9664 
   9665 /* Common */
   9666 
   9667 /*
   9668  * wm_tbi_serdes_set_linkled:
   9669  *
   9670  *	Update the link LED on TBI and SERDES devices.
   9671  */
   9672 static void
   9673 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9674 {
   9675 
   9676 	if (sc->sc_tbi_linkup)
   9677 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9678 	else
   9679 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9680 
   9681 	/* 82540 or newer devices are active low */
   9682 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9683 
   9684 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9685 }
   9686 
   9687 /* GMII related */
   9688 
   9689 /*
   9690  * wm_gmii_reset:
   9691  *
   9692  *	Reset the PHY.
   9693  */
   9694 static void
   9695 wm_gmii_reset(struct wm_softc *sc)
   9696 {
   9697 	uint32_t reg;
   9698 	int rv;
   9699 
   9700 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9701 		device_xname(sc->sc_dev), __func__));
   9702 
   9703 	rv = sc->phy.acquire(sc);
   9704 	if (rv != 0) {
   9705 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9706 		    __func__);
   9707 		return;
   9708 	}
   9709 
   9710 	switch (sc->sc_type) {
   9711 	case WM_T_82542_2_0:
   9712 	case WM_T_82542_2_1:
   9713 		/* null */
   9714 		break;
   9715 	case WM_T_82543:
   9716 		/*
   9717 		 * With 82543, we need to force speed and duplex on the MAC
   9718 		 * equal to what the PHY speed and duplex configuration is.
   9719 		 * In addition, we need to perform a hardware reset on the PHY
   9720 		 * to take it out of reset.
   9721 		 */
   9722 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9723 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9724 
   9725 		/* The PHY reset pin is active-low. */
   9726 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9727 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9728 		    CTRL_EXT_SWDPIN(4));
   9729 		reg |= CTRL_EXT_SWDPIO(4);
   9730 
   9731 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9732 		CSR_WRITE_FLUSH(sc);
   9733 		delay(10*1000);
   9734 
   9735 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9736 		CSR_WRITE_FLUSH(sc);
   9737 		delay(150);
   9738 #if 0
   9739 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9740 #endif
   9741 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9742 		break;
   9743 	case WM_T_82544:	/* Reset 10000us */
   9744 	case WM_T_82540:
   9745 	case WM_T_82545:
   9746 	case WM_T_82545_3:
   9747 	case WM_T_82546:
   9748 	case WM_T_82546_3:
   9749 	case WM_T_82541:
   9750 	case WM_T_82541_2:
   9751 	case WM_T_82547:
   9752 	case WM_T_82547_2:
   9753 	case WM_T_82571:	/* Reset 100us */
   9754 	case WM_T_82572:
   9755 	case WM_T_82573:
   9756 	case WM_T_82574:
   9757 	case WM_T_82575:
   9758 	case WM_T_82576:
   9759 	case WM_T_82580:
   9760 	case WM_T_I350:
   9761 	case WM_T_I354:
   9762 	case WM_T_I210:
   9763 	case WM_T_I211:
   9764 	case WM_T_82583:
   9765 	case WM_T_80003:
   9766 		/* Generic reset */
   9767 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9768 		CSR_WRITE_FLUSH(sc);
   9769 		delay(20000);
   9770 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9771 		CSR_WRITE_FLUSH(sc);
   9772 		delay(20000);
   9773 
   9774 		if ((sc->sc_type == WM_T_82541)
   9775 		    || (sc->sc_type == WM_T_82541_2)
   9776 		    || (sc->sc_type == WM_T_82547)
   9777 		    || (sc->sc_type == WM_T_82547_2)) {
   9778 			/* Workaround for igp are done in igp_reset() */
   9779 			/* XXX add code to set LED after phy reset */
   9780 		}
   9781 		break;
   9782 	case WM_T_ICH8:
   9783 	case WM_T_ICH9:
   9784 	case WM_T_ICH10:
   9785 	case WM_T_PCH:
   9786 	case WM_T_PCH2:
   9787 	case WM_T_PCH_LPT:
   9788 	case WM_T_PCH_SPT:
   9789 	case WM_T_PCH_CNP:
   9790 		/* Generic reset */
   9791 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9792 		CSR_WRITE_FLUSH(sc);
   9793 		delay(100);
   9794 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9795 		CSR_WRITE_FLUSH(sc);
   9796 		delay(150);
   9797 		break;
   9798 	default:
   9799 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9800 		    __func__);
   9801 		break;
   9802 	}
   9803 
   9804 	sc->phy.release(sc);
   9805 
   9806 	/* get_cfg_done */
   9807 	wm_get_cfg_done(sc);
   9808 
   9809 	/* Extra setup */
   9810 	switch (sc->sc_type) {
   9811 	case WM_T_82542_2_0:
   9812 	case WM_T_82542_2_1:
   9813 	case WM_T_82543:
   9814 	case WM_T_82544:
   9815 	case WM_T_82540:
   9816 	case WM_T_82545:
   9817 	case WM_T_82545_3:
   9818 	case WM_T_82546:
   9819 	case WM_T_82546_3:
   9820 	case WM_T_82541_2:
   9821 	case WM_T_82547_2:
   9822 	case WM_T_82571:
   9823 	case WM_T_82572:
   9824 	case WM_T_82573:
   9825 	case WM_T_82574:
   9826 	case WM_T_82583:
   9827 	case WM_T_82575:
   9828 	case WM_T_82576:
   9829 	case WM_T_82580:
   9830 	case WM_T_I350:
   9831 	case WM_T_I354:
   9832 	case WM_T_I210:
   9833 	case WM_T_I211:
   9834 	case WM_T_80003:
   9835 		/* Null */
   9836 		break;
   9837 	case WM_T_82541:
   9838 	case WM_T_82547:
   9839 		/* XXX Configure actively LED after PHY reset */
   9840 		break;
   9841 	case WM_T_ICH8:
   9842 	case WM_T_ICH9:
   9843 	case WM_T_ICH10:
   9844 	case WM_T_PCH:
   9845 	case WM_T_PCH2:
   9846 	case WM_T_PCH_LPT:
   9847 	case WM_T_PCH_SPT:
   9848 	case WM_T_PCH_CNP:
   9849 		wm_phy_post_reset(sc);
   9850 		break;
   9851 	default:
   9852 		panic("%s: unknown type\n", __func__);
   9853 		break;
   9854 	}
   9855 }
   9856 
   9857 /*
   9858  * Setup sc_phytype and mii_{read|write}reg.
   9859  *
   9860  *  To identify PHY type, correct read/write function should be selected.
   9861  * To select correct read/write function, PCI ID or MAC type are required
   9862  * without accessing PHY registers.
   9863  *
   9864  *  On the first call of this function, PHY ID is not known yet. Check
   9865  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9866  * result might be incorrect.
   9867  *
   9868  *  In the second call, PHY OUI and model is used to identify PHY type.
   9869  * It might not be perfpect because of the lack of compared entry, but it
   9870  * would be better than the first call.
   9871  *
   9872  *  If the detected new result and previous assumption is different,
   9873  * diagnous message will be printed.
   9874  */
   9875 static void
   9876 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9877     uint16_t phy_model)
   9878 {
   9879 	device_t dev = sc->sc_dev;
   9880 	struct mii_data *mii = &sc->sc_mii;
   9881 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9882 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9883 	mii_readreg_t new_readreg;
   9884 	mii_writereg_t new_writereg;
   9885 
   9886 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9887 		device_xname(sc->sc_dev), __func__));
   9888 
   9889 	if (mii->mii_readreg == NULL) {
   9890 		/*
   9891 		 *  This is the first call of this function. For ICH and PCH
   9892 		 * variants, it's difficult to determine the PHY access method
   9893 		 * by sc_type, so use the PCI product ID for some devices.
   9894 		 */
   9895 
   9896 		switch (sc->sc_pcidevid) {
   9897 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9898 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9899 			/* 82577 */
   9900 			new_phytype = WMPHY_82577;
   9901 			break;
   9902 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9903 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9904 			/* 82578 */
   9905 			new_phytype = WMPHY_82578;
   9906 			break;
   9907 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9908 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9909 			/* 82579 */
   9910 			new_phytype = WMPHY_82579;
   9911 			break;
   9912 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9913 		case PCI_PRODUCT_INTEL_82801I_BM:
   9914 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9915 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9916 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9917 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9918 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9919 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9920 			/* ICH8, 9, 10 with 82567 */
   9921 			new_phytype = WMPHY_BM;
   9922 			break;
   9923 		default:
   9924 			break;
   9925 		}
   9926 	} else {
   9927 		/* It's not the first call. Use PHY OUI and model */
   9928 		switch (phy_oui) {
   9929 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9930 			switch (phy_model) {
   9931 			case 0x0004: /* XXX */
   9932 				new_phytype = WMPHY_82578;
   9933 				break;
   9934 			default:
   9935 				break;
   9936 			}
   9937 			break;
   9938 		case MII_OUI_xxMARVELL:
   9939 			switch (phy_model) {
   9940 			case MII_MODEL_xxMARVELL_I210:
   9941 				new_phytype = WMPHY_I210;
   9942 				break;
   9943 			case MII_MODEL_xxMARVELL_E1011:
   9944 			case MII_MODEL_xxMARVELL_E1000_3:
   9945 			case MII_MODEL_xxMARVELL_E1000_5:
   9946 			case MII_MODEL_xxMARVELL_E1112:
   9947 				new_phytype = WMPHY_M88;
   9948 				break;
   9949 			case MII_MODEL_xxMARVELL_E1149:
   9950 				new_phytype = WMPHY_BM;
   9951 				break;
   9952 			case MII_MODEL_xxMARVELL_E1111:
   9953 			case MII_MODEL_xxMARVELL_I347:
   9954 			case MII_MODEL_xxMARVELL_E1512:
   9955 			case MII_MODEL_xxMARVELL_E1340M:
   9956 			case MII_MODEL_xxMARVELL_E1543:
   9957 				new_phytype = WMPHY_M88;
   9958 				break;
   9959 			case MII_MODEL_xxMARVELL_I82563:
   9960 				new_phytype = WMPHY_GG82563;
   9961 				break;
   9962 			default:
   9963 				break;
   9964 			}
   9965 			break;
   9966 		case MII_OUI_INTEL:
   9967 			switch (phy_model) {
   9968 			case MII_MODEL_INTEL_I82577:
   9969 				new_phytype = WMPHY_82577;
   9970 				break;
   9971 			case MII_MODEL_INTEL_I82579:
   9972 				new_phytype = WMPHY_82579;
   9973 				break;
   9974 			case MII_MODEL_INTEL_I217:
   9975 				new_phytype = WMPHY_I217;
   9976 				break;
   9977 			case MII_MODEL_INTEL_I82580:
   9978 			case MII_MODEL_INTEL_I350:
   9979 				new_phytype = WMPHY_82580;
   9980 				break;
   9981 			default:
   9982 				break;
   9983 			}
   9984 			break;
   9985 		case MII_OUI_yyINTEL:
   9986 			switch (phy_model) {
   9987 			case MII_MODEL_yyINTEL_I82562G:
   9988 			case MII_MODEL_yyINTEL_I82562EM:
   9989 			case MII_MODEL_yyINTEL_I82562ET:
   9990 				new_phytype = WMPHY_IFE;
   9991 				break;
   9992 			case MII_MODEL_yyINTEL_IGP01E1000:
   9993 				new_phytype = WMPHY_IGP;
   9994 				break;
   9995 			case MII_MODEL_yyINTEL_I82566:
   9996 				new_phytype = WMPHY_IGP_3;
   9997 				break;
   9998 			default:
   9999 				break;
   10000 			}
   10001 			break;
   10002 		default:
   10003 			break;
   10004 		}
   10005 		if (new_phytype == WMPHY_UNKNOWN)
   10006 			aprint_verbose_dev(dev,
   10007 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10008 			    __func__, phy_oui, phy_model);
   10009 
   10010 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10011 		    && (sc->sc_phytype != new_phytype )) {
   10012 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10013 			    "was incorrect. PHY type from PHY ID = %u\n",
   10014 			    sc->sc_phytype, new_phytype);
   10015 		}
   10016 	}
   10017 
   10018 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10019 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10020 		/* SGMII */
   10021 		new_readreg = wm_sgmii_readreg;
   10022 		new_writereg = wm_sgmii_writereg;
   10023 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10024 		/* BM2 (phyaddr == 1) */
   10025 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10026 		    && (new_phytype != WMPHY_BM)
   10027 		    && (new_phytype != WMPHY_UNKNOWN))
   10028 			doubt_phytype = new_phytype;
   10029 		new_phytype = WMPHY_BM;
   10030 		new_readreg = wm_gmii_bm_readreg;
   10031 		new_writereg = wm_gmii_bm_writereg;
   10032 	} else if (sc->sc_type >= WM_T_PCH) {
   10033 		/* All PCH* use _hv_ */
   10034 		new_readreg = wm_gmii_hv_readreg;
   10035 		new_writereg = wm_gmii_hv_writereg;
   10036 	} else if (sc->sc_type >= WM_T_ICH8) {
   10037 		/* non-82567 ICH8, 9 and 10 */
   10038 		new_readreg = wm_gmii_i82544_readreg;
   10039 		new_writereg = wm_gmii_i82544_writereg;
   10040 	} else if (sc->sc_type >= WM_T_80003) {
   10041 		/* 80003 */
   10042 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10043 		    && (new_phytype != WMPHY_GG82563)
   10044 		    && (new_phytype != WMPHY_UNKNOWN))
   10045 			doubt_phytype = new_phytype;
   10046 		new_phytype = WMPHY_GG82563;
   10047 		new_readreg = wm_gmii_i80003_readreg;
   10048 		new_writereg = wm_gmii_i80003_writereg;
   10049 	} else if (sc->sc_type >= WM_T_I210) {
   10050 		/* I210 and I211 */
   10051 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10052 		    && (new_phytype != WMPHY_I210)
   10053 		    && (new_phytype != WMPHY_UNKNOWN))
   10054 			doubt_phytype = new_phytype;
   10055 		new_phytype = WMPHY_I210;
   10056 		new_readreg = wm_gmii_gs40g_readreg;
   10057 		new_writereg = wm_gmii_gs40g_writereg;
   10058 	} else if (sc->sc_type >= WM_T_82580) {
   10059 		/* 82580, I350 and I354 */
   10060 		new_readreg = wm_gmii_82580_readreg;
   10061 		new_writereg = wm_gmii_82580_writereg;
   10062 	} else if (sc->sc_type >= WM_T_82544) {
   10063 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10064 		new_readreg = wm_gmii_i82544_readreg;
   10065 		new_writereg = wm_gmii_i82544_writereg;
   10066 	} else {
   10067 		new_readreg = wm_gmii_i82543_readreg;
   10068 		new_writereg = wm_gmii_i82543_writereg;
   10069 	}
   10070 
   10071 	if (new_phytype == WMPHY_BM) {
   10072 		/* All BM use _bm_ */
   10073 		new_readreg = wm_gmii_bm_readreg;
   10074 		new_writereg = wm_gmii_bm_writereg;
   10075 	}
   10076 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10077 		/* All PCH* use _hv_ */
   10078 		new_readreg = wm_gmii_hv_readreg;
   10079 		new_writereg = wm_gmii_hv_writereg;
   10080 	}
   10081 
   10082 	/* Diag output */
   10083 	if (doubt_phytype != WMPHY_UNKNOWN)
   10084 		aprint_error_dev(dev, "Assumed new PHY type was "
   10085 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10086 		    new_phytype);
   10087 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10088 	    && (sc->sc_phytype != new_phytype ))
   10089 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10090 		    "was incorrect. New PHY type = %u\n",
   10091 		    sc->sc_phytype, new_phytype);
   10092 
   10093 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10094 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10095 
   10096 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10097 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10098 		    "function was incorrect.\n");
   10099 
   10100 	/* Update now */
   10101 	sc->sc_phytype = new_phytype;
   10102 	mii->mii_readreg = new_readreg;
   10103 	mii->mii_writereg = new_writereg;
   10104 	if (new_readreg == wm_gmii_hv_readreg) {
   10105 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10106 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10107 	} else if (new_readreg == wm_sgmii_readreg) {
   10108 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10109 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10110 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10111 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10112 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10113 	}
   10114 }
   10115 
   10116 /*
   10117  * wm_get_phy_id_82575:
   10118  *
   10119  * Return PHY ID. Return -1 if it failed.
   10120  */
   10121 static int
   10122 wm_get_phy_id_82575(struct wm_softc *sc)
   10123 {
   10124 	uint32_t reg;
   10125 	int phyid = -1;
   10126 
   10127 	/* XXX */
   10128 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10129 		return -1;
   10130 
   10131 	if (wm_sgmii_uses_mdio(sc)) {
   10132 		switch (sc->sc_type) {
   10133 		case WM_T_82575:
   10134 		case WM_T_82576:
   10135 			reg = CSR_READ(sc, WMREG_MDIC);
   10136 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10137 			break;
   10138 		case WM_T_82580:
   10139 		case WM_T_I350:
   10140 		case WM_T_I354:
   10141 		case WM_T_I210:
   10142 		case WM_T_I211:
   10143 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10144 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10145 			break;
   10146 		default:
   10147 			return -1;
   10148 		}
   10149 	}
   10150 
   10151 	return phyid;
   10152 }
   10153 
   10154 
   10155 /*
   10156  * wm_gmii_mediainit:
   10157  *
   10158  *	Initialize media for use on 1000BASE-T devices.
   10159  */
   10160 static void
   10161 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10162 {
   10163 	device_t dev = sc->sc_dev;
   10164 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10165 	struct mii_data *mii = &sc->sc_mii;
   10166 	uint32_t reg;
   10167 
   10168 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10169 		device_xname(sc->sc_dev), __func__));
   10170 
   10171 	/* We have GMII. */
   10172 	sc->sc_flags |= WM_F_HAS_MII;
   10173 
   10174 	if (sc->sc_type == WM_T_80003)
   10175 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10176 	else
   10177 		sc->sc_tipg = TIPG_1000T_DFLT;
   10178 
   10179 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10180 	if ((sc->sc_type == WM_T_82580)
   10181 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10182 	    || (sc->sc_type == WM_T_I211)) {
   10183 		reg = CSR_READ(sc, WMREG_PHPM);
   10184 		reg &= ~PHPM_GO_LINK_D;
   10185 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10186 	}
   10187 
   10188 	/*
   10189 	 * Let the chip set speed/duplex on its own based on
   10190 	 * signals from the PHY.
   10191 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10192 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10193 	 */
   10194 	sc->sc_ctrl |= CTRL_SLU;
   10195 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10196 
   10197 	/* Initialize our media structures and probe the GMII. */
   10198 	mii->mii_ifp = ifp;
   10199 
   10200 	mii->mii_statchg = wm_gmii_statchg;
   10201 
   10202 	/* get PHY control from SMBus to PCIe */
   10203 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10204 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10205 	    || (sc->sc_type == WM_T_PCH_CNP))
   10206 		wm_init_phy_workarounds_pchlan(sc);
   10207 
   10208 	wm_gmii_reset(sc);
   10209 
   10210 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10211 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10212 	    wm_gmii_mediastatus);
   10213 
   10214 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10215 	    || (sc->sc_type == WM_T_82580)
   10216 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10217 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10218 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10219 			/* Attach only one port */
   10220 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10221 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10222 		} else {
   10223 			int i, id;
   10224 			uint32_t ctrl_ext;
   10225 
   10226 			id = wm_get_phy_id_82575(sc);
   10227 			if (id != -1) {
   10228 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10229 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10230 			}
   10231 			if ((id == -1)
   10232 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10233 				/* Power on sgmii phy if it is disabled */
   10234 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10235 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10236 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10237 				CSR_WRITE_FLUSH(sc);
   10238 				delay(300*1000); /* XXX too long */
   10239 
   10240 				/* From 1 to 8 */
   10241 				for (i = 1; i < 8; i++)
   10242 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10243 					    0xffffffff, i, MII_OFFSET_ANY,
   10244 					    MIIF_DOPAUSE);
   10245 
   10246 				/* Restore previous sfp cage power state */
   10247 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10248 			}
   10249 		}
   10250 	} else
   10251 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10252 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10253 
   10254 	/*
   10255 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10256 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10257 	 */
   10258 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10259 		|| (sc->sc_type == WM_T_PCH_SPT)
   10260 		|| (sc->sc_type == WM_T_PCH_CNP))
   10261 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10262 		wm_set_mdio_slow_mode_hv(sc);
   10263 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10264 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10265 	}
   10266 
   10267 	/*
   10268 	 * (For ICH8 variants)
   10269 	 * If PHY detection failed, use BM's r/w function and retry.
   10270 	 */
   10271 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10272 		/* if failed, retry with *_bm_* */
   10273 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10274 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10275 		    sc->sc_phytype);
   10276 		sc->sc_phytype = WMPHY_BM;
   10277 		mii->mii_readreg = wm_gmii_bm_readreg;
   10278 		mii->mii_writereg = wm_gmii_bm_writereg;
   10279 
   10280 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10281 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10282 	}
   10283 
   10284 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10285 		/* Any PHY wasn't find */
   10286 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10287 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10288 		sc->sc_phytype = WMPHY_NONE;
   10289 	} else {
   10290 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10291 
   10292 		/*
   10293 		 * PHY Found! Check PHY type again by the second call of
   10294 		 * wm_gmii_setup_phytype.
   10295 		 */
   10296 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10297 		    child->mii_mpd_model);
   10298 
   10299 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10300 	}
   10301 }
   10302 
   10303 /*
   10304  * wm_gmii_mediachange:	[ifmedia interface function]
   10305  *
   10306  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10307  */
   10308 static int
   10309 wm_gmii_mediachange(struct ifnet *ifp)
   10310 {
   10311 	struct wm_softc *sc = ifp->if_softc;
   10312 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10313 	int rc;
   10314 
   10315 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10316 		device_xname(sc->sc_dev), __func__));
   10317 	if ((ifp->if_flags & IFF_UP) == 0)
   10318 		return 0;
   10319 
   10320 	/* Disable D0 LPLU. */
   10321 	wm_lplu_d0_disable(sc);
   10322 
   10323 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10324 	sc->sc_ctrl |= CTRL_SLU;
   10325 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10326 	    || (sc->sc_type > WM_T_82543)) {
   10327 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10328 	} else {
   10329 		sc->sc_ctrl &= ~CTRL_ASDE;
   10330 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10331 		if (ife->ifm_media & IFM_FDX)
   10332 			sc->sc_ctrl |= CTRL_FD;
   10333 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10334 		case IFM_10_T:
   10335 			sc->sc_ctrl |= CTRL_SPEED_10;
   10336 			break;
   10337 		case IFM_100_TX:
   10338 			sc->sc_ctrl |= CTRL_SPEED_100;
   10339 			break;
   10340 		case IFM_1000_T:
   10341 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10342 			break;
   10343 		case IFM_NONE:
   10344 			/* There is no specific setting for IFM_NONE */
   10345 			break;
   10346 		default:
   10347 			panic("wm_gmii_mediachange: bad media 0x%x",
   10348 			    ife->ifm_media);
   10349 		}
   10350 	}
   10351 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10352 	CSR_WRITE_FLUSH(sc);
   10353 	if (sc->sc_type <= WM_T_82543)
   10354 		wm_gmii_reset(sc);
   10355 
   10356 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10357 		return 0;
   10358 	return rc;
   10359 }
   10360 
   10361 /*
   10362  * wm_gmii_mediastatus:	[ifmedia interface function]
   10363  *
   10364  *	Get the current interface media status on a 1000BASE-T device.
   10365  */
   10366 static void
   10367 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10368 {
   10369 	struct wm_softc *sc = ifp->if_softc;
   10370 
   10371 	ether_mediastatus(ifp, ifmr);
   10372 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10373 	    | sc->sc_flowflags;
   10374 }
   10375 
   10376 #define	MDI_IO		CTRL_SWDPIN(2)
   10377 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10378 #define	MDI_CLK		CTRL_SWDPIN(3)
   10379 
   10380 static void
   10381 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10382 {
   10383 	uint32_t i, v;
   10384 
   10385 	v = CSR_READ(sc, WMREG_CTRL);
   10386 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10387 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10388 
   10389 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10390 		if (data & i)
   10391 			v |= MDI_IO;
   10392 		else
   10393 			v &= ~MDI_IO;
   10394 		CSR_WRITE(sc, WMREG_CTRL, v);
   10395 		CSR_WRITE_FLUSH(sc);
   10396 		delay(10);
   10397 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10398 		CSR_WRITE_FLUSH(sc);
   10399 		delay(10);
   10400 		CSR_WRITE(sc, WMREG_CTRL, v);
   10401 		CSR_WRITE_FLUSH(sc);
   10402 		delay(10);
   10403 	}
   10404 }
   10405 
   10406 static uint16_t
   10407 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10408 {
   10409 	uint32_t v, i;
   10410 	uint16_t data = 0;
   10411 
   10412 	v = CSR_READ(sc, WMREG_CTRL);
   10413 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10414 	v |= CTRL_SWDPIO(3);
   10415 
   10416 	CSR_WRITE(sc, WMREG_CTRL, v);
   10417 	CSR_WRITE_FLUSH(sc);
   10418 	delay(10);
   10419 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10420 	CSR_WRITE_FLUSH(sc);
   10421 	delay(10);
   10422 	CSR_WRITE(sc, WMREG_CTRL, v);
   10423 	CSR_WRITE_FLUSH(sc);
   10424 	delay(10);
   10425 
   10426 	for (i = 0; i < 16; i++) {
   10427 		data <<= 1;
   10428 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10429 		CSR_WRITE_FLUSH(sc);
   10430 		delay(10);
   10431 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10432 			data |= 1;
   10433 		CSR_WRITE(sc, WMREG_CTRL, v);
   10434 		CSR_WRITE_FLUSH(sc);
   10435 		delay(10);
   10436 	}
   10437 
   10438 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10439 	CSR_WRITE_FLUSH(sc);
   10440 	delay(10);
   10441 	CSR_WRITE(sc, WMREG_CTRL, v);
   10442 	CSR_WRITE_FLUSH(sc);
   10443 	delay(10);
   10444 
   10445 	return data;
   10446 }
   10447 
   10448 #undef MDI_IO
   10449 #undef MDI_DIR
   10450 #undef MDI_CLK
   10451 
   10452 /*
   10453  * wm_gmii_i82543_readreg:	[mii interface function]
   10454  *
   10455  *	Read a PHY register on the GMII (i82543 version).
   10456  */
   10457 static int
   10458 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10459 {
   10460 	struct wm_softc *sc = device_private(dev);
   10461 
   10462 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10463 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10464 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10465 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10466 
   10467 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10468 		device_xname(dev), phy, reg, *val));
   10469 
   10470 	return 0;
   10471 }
   10472 
   10473 /*
   10474  * wm_gmii_i82543_writereg:	[mii interface function]
   10475  *
   10476  *	Write a PHY register on the GMII (i82543 version).
   10477  */
   10478 static int
   10479 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10480 {
   10481 	struct wm_softc *sc = device_private(dev);
   10482 
   10483 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10484 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10485 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10486 	    (MII_COMMAND_START << 30), 32);
   10487 
   10488 	return 0;
   10489 }
   10490 
   10491 /*
   10492  * wm_gmii_mdic_readreg:	[mii interface function]
   10493  *
   10494  *	Read a PHY register on the GMII.
   10495  */
   10496 static int
   10497 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10498 {
   10499 	struct wm_softc *sc = device_private(dev);
   10500 	uint32_t mdic = 0;
   10501 	int i;
   10502 
   10503 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10504 	    && (reg > MII_ADDRMASK)) {
   10505 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10506 		    __func__, sc->sc_phytype, reg);
   10507 		reg &= MII_ADDRMASK;
   10508 	}
   10509 
   10510 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10511 	    MDIC_REGADD(reg));
   10512 
   10513 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10514 		delay(50);
   10515 		mdic = CSR_READ(sc, WMREG_MDIC);
   10516 		if (mdic & MDIC_READY)
   10517 			break;
   10518 	}
   10519 
   10520 	if ((mdic & MDIC_READY) == 0) {
   10521 		DPRINTF(WM_DEBUG_GMII,
   10522 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10523 			device_xname(dev), phy, reg));
   10524 		return ETIMEDOUT;
   10525 	} else if (mdic & MDIC_E) {
   10526 		/* This is normal if no PHY is present. */
   10527 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10528 			device_xname(sc->sc_dev), phy, reg));
   10529 		return -1;
   10530 	} else
   10531 		*val = MDIC_DATA(mdic);
   10532 
   10533 	/*
   10534 	 * Allow some time after each MDIC transaction to avoid
   10535 	 * reading duplicate data in the next MDIC transaction.
   10536 	 */
   10537 	if (sc->sc_type == WM_T_PCH2)
   10538 		delay(100);
   10539 
   10540 	return 0;
   10541 }
   10542 
   10543 /*
   10544  * wm_gmii_mdic_writereg:	[mii interface function]
   10545  *
   10546  *	Write a PHY register on the GMII.
   10547  */
   10548 static int
   10549 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10550 {
   10551 	struct wm_softc *sc = device_private(dev);
   10552 	uint32_t mdic = 0;
   10553 	int i;
   10554 
   10555 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10556 	    && (reg > MII_ADDRMASK)) {
   10557 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10558 		    __func__, sc->sc_phytype, reg);
   10559 		reg &= MII_ADDRMASK;
   10560 	}
   10561 
   10562 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10563 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10564 
   10565 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10566 		delay(50);
   10567 		mdic = CSR_READ(sc, WMREG_MDIC);
   10568 		if (mdic & MDIC_READY)
   10569 			break;
   10570 	}
   10571 
   10572 	if ((mdic & MDIC_READY) == 0) {
   10573 		DPRINTF(WM_DEBUG_GMII,
   10574 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10575 			device_xname(dev), phy, reg));
   10576 		return ETIMEDOUT;
   10577 	} else if (mdic & MDIC_E) {
   10578 		DPRINTF(WM_DEBUG_GMII,
   10579 		    ("%s: MDIC write error: phy %d reg %d\n",
   10580 			device_xname(dev), phy, reg));
   10581 		return -1;
   10582 	}
   10583 
   10584 	/*
   10585 	 * Allow some time after each MDIC transaction to avoid
   10586 	 * reading duplicate data in the next MDIC transaction.
   10587 	 */
   10588 	if (sc->sc_type == WM_T_PCH2)
   10589 		delay(100);
   10590 
   10591 	return 0;
   10592 }
   10593 
   10594 /*
   10595  * wm_gmii_i82544_readreg:	[mii interface function]
   10596  *
   10597  *	Read a PHY register on the GMII.
   10598  */
   10599 static int
   10600 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10601 {
   10602 	struct wm_softc *sc = device_private(dev);
   10603 	int rv;
   10604 
   10605 	if (sc->phy.acquire(sc)) {
   10606 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10607 		return -1;
   10608 	}
   10609 
   10610 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10611 
   10612 	sc->phy.release(sc);
   10613 
   10614 	return rv;
   10615 }
   10616 
   10617 static int
   10618 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10619 {
   10620 	struct wm_softc *sc = device_private(dev);
   10621 	int rv;
   10622 
   10623 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10624 		switch (sc->sc_phytype) {
   10625 		case WMPHY_IGP:
   10626 		case WMPHY_IGP_2:
   10627 		case WMPHY_IGP_3:
   10628 			rv = wm_gmii_mdic_writereg(dev, phy,
   10629 			    MII_IGPHY_PAGE_SELECT, reg);
   10630 			if (rv != 0)
   10631 				return rv;
   10632 			break;
   10633 		default:
   10634 #ifdef WM_DEBUG
   10635 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10636 			    __func__, sc->sc_phytype, reg);
   10637 #endif
   10638 			break;
   10639 		}
   10640 	}
   10641 
   10642 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10643 }
   10644 
   10645 /*
   10646  * wm_gmii_i82544_writereg:	[mii interface function]
   10647  *
   10648  *	Write a PHY register on the GMII.
   10649  */
   10650 static int
   10651 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10652 {
   10653 	struct wm_softc *sc = device_private(dev);
   10654 	int rv;
   10655 
   10656 	if (sc->phy.acquire(sc)) {
   10657 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10658 		return -1;
   10659 	}
   10660 
   10661 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10662 	sc->phy.release(sc);
   10663 
   10664 	return rv;
   10665 }
   10666 
   10667 static int
   10668 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10669 {
   10670 	struct wm_softc *sc = device_private(dev);
   10671 	int rv;
   10672 
   10673 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10674 		switch (sc->sc_phytype) {
   10675 		case WMPHY_IGP:
   10676 		case WMPHY_IGP_2:
   10677 		case WMPHY_IGP_3:
   10678 			rv = wm_gmii_mdic_writereg(dev, phy,
   10679 			    MII_IGPHY_PAGE_SELECT, reg);
   10680 			if (rv != 0)
   10681 				return rv;
   10682 			break;
   10683 		default:
   10684 #ifdef WM_DEBUG
   10685 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10686 			    __func__, sc->sc_phytype, reg);
   10687 #endif
   10688 			break;
   10689 		}
   10690 	}
   10691 
   10692 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10693 }
   10694 
   10695 /*
   10696  * wm_gmii_i80003_readreg:	[mii interface function]
   10697  *
   10698  *	Read a PHY register on the kumeran
   10699  * This could be handled by the PHY layer if we didn't have to lock the
   10700  * ressource ...
   10701  */
   10702 static int
   10703 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10704 {
   10705 	struct wm_softc *sc = device_private(dev);
   10706 	int page_select;
   10707 	uint16_t temp, temp2;
   10708 	int rv = 0;
   10709 
   10710 	if (phy != 1) /* Only one PHY on kumeran bus */
   10711 		return -1;
   10712 
   10713 	if (sc->phy.acquire(sc)) {
   10714 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10715 		return -1;
   10716 	}
   10717 
   10718 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10719 		page_select = GG82563_PHY_PAGE_SELECT;
   10720 	else {
   10721 		/*
   10722 		 * Use Alternative Page Select register to access registers
   10723 		 * 30 and 31.
   10724 		 */
   10725 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10726 	}
   10727 	temp = reg >> GG82563_PAGE_SHIFT;
   10728 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10729 		goto out;
   10730 
   10731 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10732 		/*
   10733 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10734 		 * register.
   10735 		 */
   10736 		delay(200);
   10737 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10738 		if ((rv != 0) || (temp2 != temp)) {
   10739 			device_printf(dev, "%s failed\n", __func__);
   10740 			rv = -1;
   10741 			goto out;
   10742 		}
   10743 		delay(200);
   10744 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10745 		delay(200);
   10746 	} else
   10747 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10748 
   10749 out:
   10750 	sc->phy.release(sc);
   10751 	return rv;
   10752 }
   10753 
   10754 /*
   10755  * wm_gmii_i80003_writereg:	[mii interface function]
   10756  *
   10757  *	Write a PHY register on the kumeran.
   10758  * This could be handled by the PHY layer if we didn't have to lock the
   10759  * ressource ...
   10760  */
   10761 static int
   10762 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10763 {
   10764 	struct wm_softc *sc = device_private(dev);
   10765 	int page_select, rv;
   10766 	uint16_t temp, temp2;
   10767 
   10768 	if (phy != 1) /* Only one PHY on kumeran bus */
   10769 		return -1;
   10770 
   10771 	if (sc->phy.acquire(sc)) {
   10772 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10773 		return -1;
   10774 	}
   10775 
   10776 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10777 		page_select = GG82563_PHY_PAGE_SELECT;
   10778 	else {
   10779 		/*
   10780 		 * Use Alternative Page Select register to access registers
   10781 		 * 30 and 31.
   10782 		 */
   10783 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10784 	}
   10785 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10786 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10787 		goto out;
   10788 
   10789 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10790 		/*
   10791 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10792 		 * register.
   10793 		 */
   10794 		delay(200);
   10795 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10796 		if ((rv != 0) || (temp2 != temp)) {
   10797 			device_printf(dev, "%s failed\n", __func__);
   10798 			rv = -1;
   10799 			goto out;
   10800 		}
   10801 		delay(200);
   10802 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10803 		delay(200);
   10804 	} else
   10805 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10806 
   10807 out:
   10808 	sc->phy.release(sc);
   10809 	return rv;
   10810 }
   10811 
   10812 /*
   10813  * wm_gmii_bm_readreg:	[mii interface function]
   10814  *
   10815  *	Read a PHY register on the kumeran
   10816  * This could be handled by the PHY layer if we didn't have to lock the
   10817  * ressource ...
   10818  */
   10819 static int
   10820 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10821 {
   10822 	struct wm_softc *sc = device_private(dev);
   10823 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10824 	int rv;
   10825 
   10826 	if (sc->phy.acquire(sc)) {
   10827 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10828 		return -1;
   10829 	}
   10830 
   10831 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10832 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10833 		    || (reg == 31)) ? 1 : phy;
   10834 	/* Page 800 works differently than the rest so it has its own func */
   10835 	if (page == BM_WUC_PAGE) {
   10836 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10837 		goto release;
   10838 	}
   10839 
   10840 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10841 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10842 		    && (sc->sc_type != WM_T_82583))
   10843 			rv = wm_gmii_mdic_writereg(dev, phy,
   10844 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10845 		else
   10846 			rv = wm_gmii_mdic_writereg(dev, phy,
   10847 			    BME1000_PHY_PAGE_SELECT, page);
   10848 		if (rv != 0)
   10849 			goto release;
   10850 	}
   10851 
   10852 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10853 
   10854 release:
   10855 	sc->phy.release(sc);
   10856 	return rv;
   10857 }
   10858 
   10859 /*
   10860  * wm_gmii_bm_writereg:	[mii interface function]
   10861  *
   10862  *	Write a PHY register on the kumeran.
   10863  * This could be handled by the PHY layer if we didn't have to lock the
   10864  * ressource ...
   10865  */
   10866 static int
   10867 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10868 {
   10869 	struct wm_softc *sc = device_private(dev);
   10870 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10871 	int rv;
   10872 
   10873 	if (sc->phy.acquire(sc)) {
   10874 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10875 		return -1;
   10876 	}
   10877 
   10878 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10879 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10880 		    || (reg == 31)) ? 1 : phy;
   10881 	/* Page 800 works differently than the rest so it has its own func */
   10882 	if (page == BM_WUC_PAGE) {
   10883 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10884 		goto release;
   10885 	}
   10886 
   10887 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10888 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10889 		    && (sc->sc_type != WM_T_82583))
   10890 			rv = wm_gmii_mdic_writereg(dev, phy,
   10891 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10892 		else
   10893 			rv = wm_gmii_mdic_writereg(dev, phy,
   10894 			    BME1000_PHY_PAGE_SELECT, page);
   10895 		if (rv != 0)
   10896 			goto release;
   10897 	}
   10898 
   10899 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10900 
   10901 release:
   10902 	sc->phy.release(sc);
   10903 	return rv;
   10904 }
   10905 
   10906 /*
   10907  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10908  *  @dev: pointer to the HW structure
   10909  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10910  *
   10911  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10912  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10913  */
   10914 static int
   10915 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10916 {
   10917 	uint16_t temp;
   10918 	int rv;
   10919 
   10920 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10921 		device_xname(dev), __func__));
   10922 
   10923 	if (!phy_regp)
   10924 		return -1;
   10925 
   10926 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10927 
   10928 	/* Select Port Control Registers page */
   10929 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10930 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10931 	if (rv != 0)
   10932 		return rv;
   10933 
   10934 	/* Read WUCE and save it */
   10935 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10936 	if (rv != 0)
   10937 		return rv;
   10938 
   10939 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10940 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10941 	 */
   10942 	temp = *phy_regp;
   10943 	temp |= BM_WUC_ENABLE_BIT;
   10944 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10945 
   10946 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10947 		return rv;
   10948 
   10949 	/* Select Host Wakeup Registers page - caller now able to write
   10950 	 * registers on the Wakeup registers page
   10951 	 */
   10952 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10953 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10954 }
   10955 
   10956 /*
   10957  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10958  *  @dev: pointer to the HW structure
   10959  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10960  *
   10961  *  Restore BM_WUC_ENABLE_REG to its original value.
   10962  *
   10963  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10964  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10965  *  caller.
   10966  */
   10967 static int
   10968 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10969 {
   10970 
   10971 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10972 		device_xname(dev), __func__));
   10973 
   10974 	if (!phy_regp)
   10975 		return -1;
   10976 
   10977 	/* Select Port Control Registers page */
   10978 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10979 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10980 
   10981 	/* Restore 769.17 to its original value */
   10982 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10983 
   10984 	return 0;
   10985 }
   10986 
   10987 /*
   10988  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10989  *  @sc: pointer to the HW structure
   10990  *  @offset: register offset to be read or written
   10991  *  @val: pointer to the data to read or write
   10992  *  @rd: determines if operation is read or write
   10993  *  @page_set: BM_WUC_PAGE already set and access enabled
   10994  *
   10995  *  Read the PHY register at offset and store the retrieved information in
   10996  *  data, or write data to PHY register at offset.  Note the procedure to
   10997  *  access the PHY wakeup registers is different than reading the other PHY
   10998  *  registers. It works as such:
   10999  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11000  *  2) Set page to 800 for host (801 if we were manageability)
   11001  *  3) Write the address using the address opcode (0x11)
   11002  *  4) Read or write the data using the data opcode (0x12)
   11003  *  5) Restore 769.17.2 to its original value
   11004  *
   11005  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11006  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11007  *
   11008  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11009  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11010  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11011  */
   11012 static int
   11013 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11014 	bool page_set)
   11015 {
   11016 	struct wm_softc *sc = device_private(dev);
   11017 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11018 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11019 	uint16_t wuce;
   11020 	int rv = 0;
   11021 
   11022 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11023 		device_xname(dev), __func__));
   11024 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11025 	if ((sc->sc_type == WM_T_PCH)
   11026 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11027 		device_printf(dev,
   11028 		    "Attempting to access page %d while gig enabled.\n", page);
   11029 	}
   11030 
   11031 	if (!page_set) {
   11032 		/* Enable access to PHY wakeup registers */
   11033 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11034 		if (rv != 0) {
   11035 			device_printf(dev,
   11036 			    "%s: Could not enable PHY wakeup reg access\n",
   11037 			    __func__);
   11038 			return rv;
   11039 		}
   11040 	}
   11041 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11042 		device_xname(sc->sc_dev), __func__, page, regnum));
   11043 
   11044 	/*
   11045 	 * 2) Access PHY wakeup register.
   11046 	 * See wm_access_phy_wakeup_reg_bm.
   11047 	 */
   11048 
   11049 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11050 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11051 	if (rv != 0)
   11052 		return rv;
   11053 
   11054 	if (rd) {
   11055 		/* Read the Wakeup register page value using opcode 0x12 */
   11056 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11057 	} else {
   11058 		/* Write the Wakeup register page value using opcode 0x12 */
   11059 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11060 	}
   11061 	if (rv != 0)
   11062 		return rv;
   11063 
   11064 	if (!page_set)
   11065 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11066 
   11067 	return rv;
   11068 }
   11069 
   11070 /*
   11071  * wm_gmii_hv_readreg:	[mii interface function]
   11072  *
   11073  *	Read a PHY register on the kumeran
   11074  * This could be handled by the PHY layer if we didn't have to lock the
   11075  * ressource ...
   11076  */
   11077 static int
   11078 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11079 {
   11080 	struct wm_softc *sc = device_private(dev);
   11081 	int rv;
   11082 
   11083 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11084 		device_xname(dev), __func__));
   11085 	if (sc->phy.acquire(sc)) {
   11086 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11087 		return -1;
   11088 	}
   11089 
   11090 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11091 	sc->phy.release(sc);
   11092 	return rv;
   11093 }
   11094 
   11095 static int
   11096 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11097 {
   11098 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11099 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11100 	int rv;
   11101 
   11102 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11103 
   11104 	/* Page 800 works differently than the rest so it has its own func */
   11105 	if (page == BM_WUC_PAGE)
   11106 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11107 
   11108 	/*
   11109 	 * Lower than page 768 works differently than the rest so it has its
   11110 	 * own func
   11111 	 */
   11112 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11113 		printf("gmii_hv_readreg!!!\n");
   11114 		return -1;
   11115 	}
   11116 
   11117 	/*
   11118 	 * XXX I21[789] documents say that the SMBus Address register is at
   11119 	 * PHY address 01, Page 0 (not 768), Register 26.
   11120 	 */
   11121 	if (page == HV_INTC_FC_PAGE_START)
   11122 		page = 0;
   11123 
   11124 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11125 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11126 		    page << BME1000_PAGE_SHIFT);
   11127 		if (rv != 0)
   11128 			return rv;
   11129 	}
   11130 
   11131 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11132 }
   11133 
   11134 /*
   11135  * wm_gmii_hv_writereg:	[mii interface function]
   11136  *
   11137  *	Write a PHY register on the kumeran.
   11138  * This could be handled by the PHY layer if we didn't have to lock the
   11139  * ressource ...
   11140  */
   11141 static int
   11142 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11143 {
   11144 	struct wm_softc *sc = device_private(dev);
   11145 	int rv;
   11146 
   11147 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11148 		device_xname(dev), __func__));
   11149 
   11150 	if (sc->phy.acquire(sc)) {
   11151 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11152 		return -1;
   11153 	}
   11154 
   11155 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11156 	sc->phy.release(sc);
   11157 
   11158 	return rv;
   11159 }
   11160 
   11161 static int
   11162 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11163 {
   11164 	struct wm_softc *sc = device_private(dev);
   11165 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11166 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11167 	int rv;
   11168 
   11169 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11170 
   11171 	/* Page 800 works differently than the rest so it has its own func */
   11172 	if (page == BM_WUC_PAGE)
   11173 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11174 		    false);
   11175 
   11176 	/*
   11177 	 * Lower than page 768 works differently than the rest so it has its
   11178 	 * own func
   11179 	 */
   11180 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11181 		printf("gmii_hv_writereg!!!\n");
   11182 		return -1;
   11183 	}
   11184 
   11185 	{
   11186 		/*
   11187 		 * XXX I21[789] documents say that the SMBus Address register
   11188 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11189 		 */
   11190 		if (page == HV_INTC_FC_PAGE_START)
   11191 			page = 0;
   11192 
   11193 		/*
   11194 		 * XXX Workaround MDIO accesses being disabled after entering
   11195 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11196 		 * register is set)
   11197 		 */
   11198 		if (sc->sc_phytype == WMPHY_82578) {
   11199 			struct mii_softc *child;
   11200 
   11201 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11202 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11203 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11204 			    && ((val & (1 << 11)) != 0)) {
   11205 				printf("XXX need workaround\n");
   11206 			}
   11207 		}
   11208 
   11209 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11210 			rv = wm_gmii_mdic_writereg(dev, 1,
   11211 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11212 			if (rv != 0)
   11213 				return rv;
   11214 		}
   11215 	}
   11216 
   11217 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11218 }
   11219 
   11220 /*
   11221  * wm_gmii_82580_readreg:	[mii interface function]
   11222  *
   11223  *	Read a PHY register on the 82580 and I350.
   11224  * This could be handled by the PHY layer if we didn't have to lock the
   11225  * ressource ...
   11226  */
   11227 static int
   11228 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11229 {
   11230 	struct wm_softc *sc = device_private(dev);
   11231 	int rv;
   11232 
   11233 	if (sc->phy.acquire(sc) != 0) {
   11234 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11235 		return -1;
   11236 	}
   11237 
   11238 #ifdef DIAGNOSTIC
   11239 	if (reg > MII_ADDRMASK) {
   11240 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11241 		    __func__, sc->sc_phytype, reg);
   11242 		reg &= MII_ADDRMASK;
   11243 	}
   11244 #endif
   11245 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11246 
   11247 	sc->phy.release(sc);
   11248 	return rv;
   11249 }
   11250 
   11251 /*
   11252  * wm_gmii_82580_writereg:	[mii interface function]
   11253  *
   11254  *	Write a PHY register on the 82580 and I350.
   11255  * This could be handled by the PHY layer if we didn't have to lock the
   11256  * ressource ...
   11257  */
   11258 static int
   11259 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11260 {
   11261 	struct wm_softc *sc = device_private(dev);
   11262 	int rv;
   11263 
   11264 	if (sc->phy.acquire(sc) != 0) {
   11265 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11266 		return -1;
   11267 	}
   11268 
   11269 #ifdef DIAGNOSTIC
   11270 	if (reg > MII_ADDRMASK) {
   11271 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11272 		    __func__, sc->sc_phytype, reg);
   11273 		reg &= MII_ADDRMASK;
   11274 	}
   11275 #endif
   11276 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11277 
   11278 	sc->phy.release(sc);
   11279 	return rv;
   11280 }
   11281 
   11282 /*
   11283  * wm_gmii_gs40g_readreg:	[mii interface function]
   11284  *
   11285  *	Read a PHY register on the I2100 and I211.
   11286  * This could be handled by the PHY layer if we didn't have to lock the
   11287  * ressource ...
   11288  */
   11289 static int
   11290 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11291 {
   11292 	struct wm_softc *sc = device_private(dev);
   11293 	int page, offset;
   11294 	int rv;
   11295 
   11296 	/* Acquire semaphore */
   11297 	if (sc->phy.acquire(sc)) {
   11298 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11299 		return -1;
   11300 	}
   11301 
   11302 	/* Page select */
   11303 	page = reg >> GS40G_PAGE_SHIFT;
   11304 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11305 	if (rv != 0)
   11306 		goto release;
   11307 
   11308 	/* Read reg */
   11309 	offset = reg & GS40G_OFFSET_MASK;
   11310 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11311 
   11312 release:
   11313 	sc->phy.release(sc);
   11314 	return rv;
   11315 }
   11316 
   11317 /*
   11318  * wm_gmii_gs40g_writereg:	[mii interface function]
   11319  *
   11320  *	Write a PHY register on the I210 and I211.
   11321  * This could be handled by the PHY layer if we didn't have to lock the
   11322  * ressource ...
   11323  */
   11324 static int
   11325 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11326 {
   11327 	struct wm_softc *sc = device_private(dev);
   11328 	uint16_t page;
   11329 	int offset, rv;
   11330 
   11331 	/* Acquire semaphore */
   11332 	if (sc->phy.acquire(sc)) {
   11333 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11334 		return -1;
   11335 	}
   11336 
   11337 	/* Page select */
   11338 	page = reg >> GS40G_PAGE_SHIFT;
   11339 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11340 	if (rv != 0)
   11341 		goto release;
   11342 
   11343 	/* Write reg */
   11344 	offset = reg & GS40G_OFFSET_MASK;
   11345 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11346 
   11347 release:
   11348 	/* Release semaphore */
   11349 	sc->phy.release(sc);
   11350 	return rv;
   11351 }
   11352 
   11353 /*
   11354  * wm_gmii_statchg:	[mii interface function]
   11355  *
   11356  *	Callback from MII layer when media changes.
   11357  */
   11358 static void
   11359 wm_gmii_statchg(struct ifnet *ifp)
   11360 {
   11361 	struct wm_softc *sc = ifp->if_softc;
   11362 	struct mii_data *mii = &sc->sc_mii;
   11363 
   11364 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11365 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11366 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11367 
   11368 	/* Get flow control negotiation result. */
   11369 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11370 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11371 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11372 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11373 	}
   11374 
   11375 	if (sc->sc_flowflags & IFM_FLOW) {
   11376 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11377 			sc->sc_ctrl |= CTRL_TFCE;
   11378 			sc->sc_fcrtl |= FCRTL_XONE;
   11379 		}
   11380 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11381 			sc->sc_ctrl |= CTRL_RFCE;
   11382 	}
   11383 
   11384 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11385 		DPRINTF(WM_DEBUG_LINK,
   11386 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11387 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11388 	} else {
   11389 		DPRINTF(WM_DEBUG_LINK,
   11390 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11391 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11392 	}
   11393 
   11394 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11395 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11396 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11397 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11398 	if (sc->sc_type == WM_T_80003) {
   11399 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11400 		case IFM_1000_T:
   11401 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11402 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11403 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11404 			break;
   11405 		default:
   11406 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11407 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11408 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11409 			break;
   11410 		}
   11411 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11412 	}
   11413 }
   11414 
   11415 /* kumeran related (80003, ICH* and PCH*) */
   11416 
   11417 /*
   11418  * wm_kmrn_readreg:
   11419  *
   11420  *	Read a kumeran register
   11421  */
   11422 static int
   11423 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11424 {
   11425 	int rv;
   11426 
   11427 	if (sc->sc_type == WM_T_80003)
   11428 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11429 	else
   11430 		rv = sc->phy.acquire(sc);
   11431 	if (rv != 0) {
   11432 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11433 		    __func__);
   11434 		return rv;
   11435 	}
   11436 
   11437 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11438 
   11439 	if (sc->sc_type == WM_T_80003)
   11440 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11441 	else
   11442 		sc->phy.release(sc);
   11443 
   11444 	return rv;
   11445 }
   11446 
   11447 static int
   11448 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11449 {
   11450 
   11451 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11452 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11453 	    KUMCTRLSTA_REN);
   11454 	CSR_WRITE_FLUSH(sc);
   11455 	delay(2);
   11456 
   11457 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11458 
   11459 	return 0;
   11460 }
   11461 
   11462 /*
   11463  * wm_kmrn_writereg:
   11464  *
   11465  *	Write a kumeran register
   11466  */
   11467 static int
   11468 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11469 {
   11470 	int rv;
   11471 
   11472 	if (sc->sc_type == WM_T_80003)
   11473 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11474 	else
   11475 		rv = sc->phy.acquire(sc);
   11476 	if (rv != 0) {
   11477 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11478 		    __func__);
   11479 		return rv;
   11480 	}
   11481 
   11482 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11483 
   11484 	if (sc->sc_type == WM_T_80003)
   11485 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11486 	else
   11487 		sc->phy.release(sc);
   11488 
   11489 	return rv;
   11490 }
   11491 
   11492 static int
   11493 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11494 {
   11495 
   11496 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11497 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11498 
   11499 	return 0;
   11500 }
   11501 
   11502 /*
   11503  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11504  * This access method is different from IEEE MMD.
   11505  */
   11506 static int
   11507 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11508 {
   11509 	struct wm_softc *sc = device_private(dev);
   11510 	int rv;
   11511 
   11512 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11513 	if (rv != 0)
   11514 		return rv;
   11515 
   11516 	if (rd)
   11517 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11518 	else
   11519 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11520 	return rv;
   11521 }
   11522 
   11523 static int
   11524 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11525 {
   11526 
   11527 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11528 }
   11529 
   11530 static int
   11531 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11532 {
   11533 
   11534 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11535 }
   11536 
   11537 /* SGMII related */
   11538 
   11539 /*
   11540  * wm_sgmii_uses_mdio
   11541  *
   11542  * Check whether the transaction is to the internal PHY or the external
   11543  * MDIO interface. Return true if it's MDIO.
   11544  */
   11545 static bool
   11546 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11547 {
   11548 	uint32_t reg;
   11549 	bool ismdio = false;
   11550 
   11551 	switch (sc->sc_type) {
   11552 	case WM_T_82575:
   11553 	case WM_T_82576:
   11554 		reg = CSR_READ(sc, WMREG_MDIC);
   11555 		ismdio = ((reg & MDIC_DEST) != 0);
   11556 		break;
   11557 	case WM_T_82580:
   11558 	case WM_T_I350:
   11559 	case WM_T_I354:
   11560 	case WM_T_I210:
   11561 	case WM_T_I211:
   11562 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11563 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11564 		break;
   11565 	default:
   11566 		break;
   11567 	}
   11568 
   11569 	return ismdio;
   11570 }
   11571 
   11572 /*
   11573  * wm_sgmii_readreg:	[mii interface function]
   11574  *
   11575  *	Read a PHY register on the SGMII
   11576  * This could be handled by the PHY layer if we didn't have to lock the
   11577  * ressource ...
   11578  */
   11579 static int
   11580 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11581 {
   11582 	struct wm_softc *sc = device_private(dev);
   11583 	int rv;
   11584 
   11585 	if (sc->phy.acquire(sc)) {
   11586 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11587 		return -1;
   11588 	}
   11589 
   11590 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11591 
   11592 	sc->phy.release(sc);
   11593 	return rv;
   11594 }
   11595 
   11596 static int
   11597 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11598 {
   11599 	struct wm_softc *sc = device_private(dev);
   11600 	uint32_t i2ccmd;
   11601 	int i, rv;
   11602 
   11603 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11604 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11605 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11606 
   11607 	/* Poll the ready bit */
   11608 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11609 		delay(50);
   11610 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11611 		if (i2ccmd & I2CCMD_READY)
   11612 			break;
   11613 	}
   11614 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11615 		device_printf(dev, "I2CCMD Read did not complete\n");
   11616 		rv = ETIMEDOUT;
   11617 	}
   11618 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11619 		device_printf(dev, "I2CCMD Error bit set\n");
   11620 		rv = EIO;
   11621 	}
   11622 
   11623 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11624 
   11625 	return rv;
   11626 }
   11627 
   11628 /*
   11629  * wm_sgmii_writereg:	[mii interface function]
   11630  *
   11631  *	Write a PHY register on the SGMII.
   11632  * This could be handled by the PHY layer if we didn't have to lock the
   11633  * ressource ...
   11634  */
   11635 static int
   11636 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11637 {
   11638 	struct wm_softc *sc = device_private(dev);
   11639 	int rv;
   11640 
   11641 	if (sc->phy.acquire(sc) != 0) {
   11642 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11643 		return -1;
   11644 	}
   11645 
   11646 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11647 
   11648 	sc->phy.release(sc);
   11649 
   11650 	return rv;
   11651 }
   11652 
   11653 static int
   11654 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11655 {
   11656 	struct wm_softc *sc = device_private(dev);
   11657 	uint32_t i2ccmd;
   11658 	uint16_t swapdata;
   11659 	int rv = 0;
   11660 	int i;
   11661 
   11662 	/* Swap the data bytes for the I2C interface */
   11663 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11664 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11665 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11666 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11667 
   11668 	/* Poll the ready bit */
   11669 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11670 		delay(50);
   11671 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11672 		if (i2ccmd & I2CCMD_READY)
   11673 			break;
   11674 	}
   11675 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11676 		device_printf(dev, "I2CCMD Write did not complete\n");
   11677 		rv = ETIMEDOUT;
   11678 	}
   11679 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11680 		device_printf(dev, "I2CCMD Error bit set\n");
   11681 		rv = EIO;
   11682 	}
   11683 
   11684 	return rv;
   11685 }
   11686 
   11687 /* TBI related */
   11688 
   11689 static bool
   11690 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11691 {
   11692 	bool sig;
   11693 
   11694 	sig = ctrl & CTRL_SWDPIN(1);
   11695 
   11696 	/*
   11697 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11698 	 * detect a signal, 1 if they don't.
   11699 	 */
   11700 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11701 		sig = !sig;
   11702 
   11703 	return sig;
   11704 }
   11705 
   11706 /*
   11707  * wm_tbi_mediainit:
   11708  *
   11709  *	Initialize media for use on 1000BASE-X devices.
   11710  */
   11711 static void
   11712 wm_tbi_mediainit(struct wm_softc *sc)
   11713 {
   11714 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11715 	const char *sep = "";
   11716 
   11717 	if (sc->sc_type < WM_T_82543)
   11718 		sc->sc_tipg = TIPG_WM_DFLT;
   11719 	else
   11720 		sc->sc_tipg = TIPG_LG_DFLT;
   11721 
   11722 	sc->sc_tbi_serdes_anegticks = 5;
   11723 
   11724 	/* Initialize our media structures */
   11725 	sc->sc_mii.mii_ifp = ifp;
   11726 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11727 
   11728 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11729 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11730 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11731 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11732 	else
   11733 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11734 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11735 
   11736 	/*
   11737 	 * SWD Pins:
   11738 	 *
   11739 	 *	0 = Link LED (output)
   11740 	 *	1 = Loss Of Signal (input)
   11741 	 */
   11742 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11743 
   11744 	/* XXX Perhaps this is only for TBI */
   11745 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11746 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11747 
   11748 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11749 		sc->sc_ctrl &= ~CTRL_LRST;
   11750 
   11751 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11752 
   11753 #define	ADD(ss, mm, dd)							\
   11754 do {									\
   11755 	aprint_normal("%s%s", sep, ss);					\
   11756 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11757 	sep = ", ";							\
   11758 } while (/*CONSTCOND*/0)
   11759 
   11760 	aprint_normal_dev(sc->sc_dev, "");
   11761 
   11762 	if (sc->sc_type == WM_T_I354) {
   11763 		uint32_t status;
   11764 
   11765 		status = CSR_READ(sc, WMREG_STATUS);
   11766 		if (((status & STATUS_2P5_SKU) != 0)
   11767 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11768 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11769 		} else
   11770 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11771 	} else if (sc->sc_type == WM_T_82545) {
   11772 		/* Only 82545 is LX (XXX except SFP) */
   11773 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11774 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11775 	} else {
   11776 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11777 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11778 	}
   11779 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11780 	aprint_normal("\n");
   11781 
   11782 #undef ADD
   11783 
   11784 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11785 }
   11786 
   11787 /*
   11788  * wm_tbi_mediachange:	[ifmedia interface function]
   11789  *
   11790  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11791  */
   11792 static int
   11793 wm_tbi_mediachange(struct ifnet *ifp)
   11794 {
   11795 	struct wm_softc *sc = ifp->if_softc;
   11796 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11797 	uint32_t status, ctrl;
   11798 	bool signal;
   11799 	int i;
   11800 
   11801 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11802 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11803 		/* XXX need some work for >= 82571 and < 82575 */
   11804 		if (sc->sc_type < WM_T_82575)
   11805 			return 0;
   11806 	}
   11807 
   11808 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11809 	    || (sc->sc_type >= WM_T_82575))
   11810 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11811 
   11812 	sc->sc_ctrl &= ~CTRL_LRST;
   11813 	sc->sc_txcw = TXCW_ANE;
   11814 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11815 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11816 	else if (ife->ifm_media & IFM_FDX)
   11817 		sc->sc_txcw |= TXCW_FD;
   11818 	else
   11819 		sc->sc_txcw |= TXCW_HD;
   11820 
   11821 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11822 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11823 
   11824 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11825 		device_xname(sc->sc_dev), sc->sc_txcw));
   11826 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11827 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11828 	CSR_WRITE_FLUSH(sc);
   11829 	delay(1000);
   11830 
   11831 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11832 	signal = wm_tbi_havesignal(sc, ctrl);
   11833 
   11834 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11835 		signal));
   11836 
   11837 	if (signal) {
   11838 		/* Have signal; wait for the link to come up. */
   11839 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11840 			delay(10000);
   11841 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11842 				break;
   11843 		}
   11844 
   11845 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11846 			device_xname(sc->sc_dev),i));
   11847 
   11848 		status = CSR_READ(sc, WMREG_STATUS);
   11849 		DPRINTF(WM_DEBUG_LINK,
   11850 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11851 			device_xname(sc->sc_dev),status, STATUS_LU));
   11852 		if (status & STATUS_LU) {
   11853 			/* Link is up. */
   11854 			DPRINTF(WM_DEBUG_LINK,
   11855 			    ("%s: LINK: set media -> link up %s\n",
   11856 				device_xname(sc->sc_dev),
   11857 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11858 
   11859 			/*
   11860 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11861 			 * so we should update sc->sc_ctrl
   11862 			 */
   11863 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11864 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11865 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11866 			if (status & STATUS_FD)
   11867 				sc->sc_tctl |=
   11868 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11869 			else
   11870 				sc->sc_tctl |=
   11871 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11872 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11873 				sc->sc_fcrtl |= FCRTL_XONE;
   11874 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11875 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11876 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11877 			sc->sc_tbi_linkup = 1;
   11878 		} else {
   11879 			if (i == WM_LINKUP_TIMEOUT)
   11880 				wm_check_for_link(sc);
   11881 			/* Link is down. */
   11882 			DPRINTF(WM_DEBUG_LINK,
   11883 			    ("%s: LINK: set media -> link down\n",
   11884 				device_xname(sc->sc_dev)));
   11885 			sc->sc_tbi_linkup = 0;
   11886 		}
   11887 	} else {
   11888 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11889 			device_xname(sc->sc_dev)));
   11890 		sc->sc_tbi_linkup = 0;
   11891 	}
   11892 
   11893 	wm_tbi_serdes_set_linkled(sc);
   11894 
   11895 	return 0;
   11896 }
   11897 
   11898 /*
   11899  * wm_tbi_mediastatus:	[ifmedia interface function]
   11900  *
   11901  *	Get the current interface media status on a 1000BASE-X device.
   11902  */
   11903 static void
   11904 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11905 {
   11906 	struct wm_softc *sc = ifp->if_softc;
   11907 	uint32_t ctrl, status;
   11908 
   11909 	ifmr->ifm_status = IFM_AVALID;
   11910 	ifmr->ifm_active = IFM_ETHER;
   11911 
   11912 	status = CSR_READ(sc, WMREG_STATUS);
   11913 	if ((status & STATUS_LU) == 0) {
   11914 		ifmr->ifm_active |= IFM_NONE;
   11915 		return;
   11916 	}
   11917 
   11918 	ifmr->ifm_status |= IFM_ACTIVE;
   11919 	/* Only 82545 is LX */
   11920 	if (sc->sc_type == WM_T_82545)
   11921 		ifmr->ifm_active |= IFM_1000_LX;
   11922 	else
   11923 		ifmr->ifm_active |= IFM_1000_SX;
   11924 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11925 		ifmr->ifm_active |= IFM_FDX;
   11926 	else
   11927 		ifmr->ifm_active |= IFM_HDX;
   11928 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11929 	if (ctrl & CTRL_RFCE)
   11930 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11931 	if (ctrl & CTRL_TFCE)
   11932 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11933 }
   11934 
   11935 /* XXX TBI only */
   11936 static int
   11937 wm_check_for_link(struct wm_softc *sc)
   11938 {
   11939 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11940 	uint32_t rxcw;
   11941 	uint32_t ctrl;
   11942 	uint32_t status;
   11943 	bool signal;
   11944 
   11945 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11946 		device_xname(sc->sc_dev), __func__));
   11947 
   11948 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11949 		/* XXX need some work for >= 82571 */
   11950 		if (sc->sc_type >= WM_T_82571) {
   11951 			sc->sc_tbi_linkup = 1;
   11952 			return 0;
   11953 		}
   11954 	}
   11955 
   11956 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11957 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11958 	status = CSR_READ(sc, WMREG_STATUS);
   11959 	signal = wm_tbi_havesignal(sc, ctrl);
   11960 
   11961 	DPRINTF(WM_DEBUG_LINK,
   11962 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11963 		device_xname(sc->sc_dev), __func__, signal,
   11964 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11965 
   11966 	/*
   11967 	 * SWDPIN   LU RXCW
   11968 	 *	0    0	  0
   11969 	 *	0    0	  1	(should not happen)
   11970 	 *	0    1	  0	(should not happen)
   11971 	 *	0    1	  1	(should not happen)
   11972 	 *	1    0	  0	Disable autonego and force linkup
   11973 	 *	1    0	  1	got /C/ but not linkup yet
   11974 	 *	1    1	  0	(linkup)
   11975 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11976 	 *
   11977 	 */
   11978 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11979 		DPRINTF(WM_DEBUG_LINK,
   11980 		    ("%s: %s: force linkup and fullduplex\n",
   11981 			device_xname(sc->sc_dev), __func__));
   11982 		sc->sc_tbi_linkup = 0;
   11983 		/* Disable auto-negotiation in the TXCW register */
   11984 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11985 
   11986 		/*
   11987 		 * Force link-up and also force full-duplex.
   11988 		 *
   11989 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11990 		 * so we should update sc->sc_ctrl
   11991 		 */
   11992 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11993 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11994 	} else if (((status & STATUS_LU) != 0)
   11995 	    && ((rxcw & RXCW_C) != 0)
   11996 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11997 		sc->sc_tbi_linkup = 1;
   11998 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11999 			device_xname(sc->sc_dev),
   12000 			__func__));
   12001 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12002 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12003 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12004 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12005 			device_xname(sc->sc_dev), __func__));
   12006 	} else {
   12007 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12008 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12009 			status));
   12010 	}
   12011 
   12012 	return 0;
   12013 }
   12014 
   12015 /*
   12016  * wm_tbi_tick:
   12017  *
   12018  *	Check the link on TBI devices.
   12019  *	This function acts as mii_tick().
   12020  */
   12021 static void
   12022 wm_tbi_tick(struct wm_softc *sc)
   12023 {
   12024 	struct mii_data *mii = &sc->sc_mii;
   12025 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12026 	uint32_t status;
   12027 
   12028 	KASSERT(WM_CORE_LOCKED(sc));
   12029 
   12030 	status = CSR_READ(sc, WMREG_STATUS);
   12031 
   12032 	/* XXX is this needed? */
   12033 	(void)CSR_READ(sc, WMREG_RXCW);
   12034 	(void)CSR_READ(sc, WMREG_CTRL);
   12035 
   12036 	/* set link status */
   12037 	if ((status & STATUS_LU) == 0) {
   12038 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12039 			device_xname(sc->sc_dev)));
   12040 		sc->sc_tbi_linkup = 0;
   12041 	} else if (sc->sc_tbi_linkup == 0) {
   12042 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12043 			device_xname(sc->sc_dev),
   12044 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12045 		sc->sc_tbi_linkup = 1;
   12046 		sc->sc_tbi_serdes_ticks = 0;
   12047 	}
   12048 
   12049 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12050 		goto setled;
   12051 
   12052 	if ((status & STATUS_LU) == 0) {
   12053 		sc->sc_tbi_linkup = 0;
   12054 		/* If the timer expired, retry autonegotiation */
   12055 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12056 		    && (++sc->sc_tbi_serdes_ticks
   12057 			>= sc->sc_tbi_serdes_anegticks)) {
   12058 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12059 			sc->sc_tbi_serdes_ticks = 0;
   12060 			/*
   12061 			 * Reset the link, and let autonegotiation do
   12062 			 * its thing
   12063 			 */
   12064 			sc->sc_ctrl |= CTRL_LRST;
   12065 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12066 			CSR_WRITE_FLUSH(sc);
   12067 			delay(1000);
   12068 			sc->sc_ctrl &= ~CTRL_LRST;
   12069 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12070 			CSR_WRITE_FLUSH(sc);
   12071 			delay(1000);
   12072 			CSR_WRITE(sc, WMREG_TXCW,
   12073 			    sc->sc_txcw & ~TXCW_ANE);
   12074 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12075 		}
   12076 	}
   12077 
   12078 setled:
   12079 	wm_tbi_serdes_set_linkled(sc);
   12080 }
   12081 
   12082 /* SERDES related */
   12083 static void
   12084 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12085 {
   12086 	uint32_t reg;
   12087 
   12088 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12089 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12090 		return;
   12091 
   12092 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12093 	reg |= PCS_CFG_PCS_EN;
   12094 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12095 
   12096 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12097 	reg &= ~CTRL_EXT_SWDPIN(3);
   12098 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12099 	CSR_WRITE_FLUSH(sc);
   12100 }
   12101 
   12102 static int
   12103 wm_serdes_mediachange(struct ifnet *ifp)
   12104 {
   12105 	struct wm_softc *sc = ifp->if_softc;
   12106 	bool pcs_autoneg = true; /* XXX */
   12107 	uint32_t ctrl_ext, pcs_lctl, reg;
   12108 
   12109 	/* XXX Currently, this function is not called on 8257[12] */
   12110 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12111 	    || (sc->sc_type >= WM_T_82575))
   12112 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12113 
   12114 	wm_serdes_power_up_link_82575(sc);
   12115 
   12116 	sc->sc_ctrl |= CTRL_SLU;
   12117 
   12118 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12119 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12120 
   12121 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12122 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12123 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12124 	case CTRL_EXT_LINK_MODE_SGMII:
   12125 		pcs_autoneg = true;
   12126 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12127 		break;
   12128 	case CTRL_EXT_LINK_MODE_1000KX:
   12129 		pcs_autoneg = false;
   12130 		/* FALLTHROUGH */
   12131 	default:
   12132 		if ((sc->sc_type == WM_T_82575)
   12133 		    || (sc->sc_type == WM_T_82576)) {
   12134 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12135 				pcs_autoneg = false;
   12136 		}
   12137 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12138 		    | CTRL_FRCFDX;
   12139 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12140 	}
   12141 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12142 
   12143 	if (pcs_autoneg) {
   12144 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12145 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12146 
   12147 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12148 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12149 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12150 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12151 	} else
   12152 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12153 
   12154 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12155 
   12156 
   12157 	return 0;
   12158 }
   12159 
   12160 static void
   12161 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12162 {
   12163 	struct wm_softc *sc = ifp->if_softc;
   12164 	struct mii_data *mii = &sc->sc_mii;
   12165 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12166 	uint32_t pcs_adv, pcs_lpab, reg;
   12167 
   12168 	ifmr->ifm_status = IFM_AVALID;
   12169 	ifmr->ifm_active = IFM_ETHER;
   12170 
   12171 	/* Check PCS */
   12172 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12173 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12174 		ifmr->ifm_active |= IFM_NONE;
   12175 		sc->sc_tbi_linkup = 0;
   12176 		goto setled;
   12177 	}
   12178 
   12179 	sc->sc_tbi_linkup = 1;
   12180 	ifmr->ifm_status |= IFM_ACTIVE;
   12181 	if (sc->sc_type == WM_T_I354) {
   12182 		uint32_t status;
   12183 
   12184 		status = CSR_READ(sc, WMREG_STATUS);
   12185 		if (((status & STATUS_2P5_SKU) != 0)
   12186 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12187 			ifmr->ifm_active |= IFM_2500_KX;
   12188 		} else
   12189 			ifmr->ifm_active |= IFM_1000_KX;
   12190 	} else {
   12191 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12192 		case PCS_LSTS_SPEED_10:
   12193 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12194 			break;
   12195 		case PCS_LSTS_SPEED_100:
   12196 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12197 			break;
   12198 		case PCS_LSTS_SPEED_1000:
   12199 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12200 			break;
   12201 		default:
   12202 			device_printf(sc->sc_dev, "Unknown speed\n");
   12203 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12204 			break;
   12205 		}
   12206 	}
   12207 	if ((reg & PCS_LSTS_FDX) != 0)
   12208 		ifmr->ifm_active |= IFM_FDX;
   12209 	else
   12210 		ifmr->ifm_active |= IFM_HDX;
   12211 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12212 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12213 		/* Check flow */
   12214 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12215 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12216 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12217 			goto setled;
   12218 		}
   12219 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12220 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12221 		DPRINTF(WM_DEBUG_LINK,
   12222 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12223 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12224 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12225 			mii->mii_media_active |= IFM_FLOW
   12226 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12227 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12228 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12229 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12230 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12231 			mii->mii_media_active |= IFM_FLOW
   12232 			    | IFM_ETH_TXPAUSE;
   12233 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12234 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12235 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12236 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12237 			mii->mii_media_active |= IFM_FLOW
   12238 			    | IFM_ETH_RXPAUSE;
   12239 		}
   12240 	}
   12241 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12242 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12243 setled:
   12244 	wm_tbi_serdes_set_linkled(sc);
   12245 }
   12246 
   12247 /*
   12248  * wm_serdes_tick:
   12249  *
   12250  *	Check the link on serdes devices.
   12251  */
   12252 static void
   12253 wm_serdes_tick(struct wm_softc *sc)
   12254 {
   12255 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12256 	struct mii_data *mii = &sc->sc_mii;
   12257 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12258 	uint32_t reg;
   12259 
   12260 	KASSERT(WM_CORE_LOCKED(sc));
   12261 
   12262 	mii->mii_media_status = IFM_AVALID;
   12263 	mii->mii_media_active = IFM_ETHER;
   12264 
   12265 	/* Check PCS */
   12266 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12267 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12268 		mii->mii_media_status |= IFM_ACTIVE;
   12269 		sc->sc_tbi_linkup = 1;
   12270 		sc->sc_tbi_serdes_ticks = 0;
   12271 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12272 		if ((reg & PCS_LSTS_FDX) != 0)
   12273 			mii->mii_media_active |= IFM_FDX;
   12274 		else
   12275 			mii->mii_media_active |= IFM_HDX;
   12276 	} else {
   12277 		mii->mii_media_status |= IFM_NONE;
   12278 		sc->sc_tbi_linkup = 0;
   12279 		/* If the timer expired, retry autonegotiation */
   12280 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12281 		    && (++sc->sc_tbi_serdes_ticks
   12282 			>= sc->sc_tbi_serdes_anegticks)) {
   12283 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12284 			sc->sc_tbi_serdes_ticks = 0;
   12285 			/* XXX */
   12286 			wm_serdes_mediachange(ifp);
   12287 		}
   12288 	}
   12289 
   12290 	wm_tbi_serdes_set_linkled(sc);
   12291 }
   12292 
   12293 /* SFP related */
   12294 
   12295 static int
   12296 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12297 {
   12298 	uint32_t i2ccmd;
   12299 	int i;
   12300 
   12301 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12302 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12303 
   12304 	/* Poll the ready bit */
   12305 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12306 		delay(50);
   12307 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12308 		if (i2ccmd & I2CCMD_READY)
   12309 			break;
   12310 	}
   12311 	if ((i2ccmd & I2CCMD_READY) == 0)
   12312 		return -1;
   12313 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12314 		return -1;
   12315 
   12316 	*data = i2ccmd & 0x00ff;
   12317 
   12318 	return 0;
   12319 }
   12320 
   12321 static uint32_t
   12322 wm_sfp_get_media_type(struct wm_softc *sc)
   12323 {
   12324 	uint32_t ctrl_ext;
   12325 	uint8_t val = 0;
   12326 	int timeout = 3;
   12327 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12328 	int rv = -1;
   12329 
   12330 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12331 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12332 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12333 	CSR_WRITE_FLUSH(sc);
   12334 
   12335 	/* Read SFP module data */
   12336 	while (timeout) {
   12337 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12338 		if (rv == 0)
   12339 			break;
   12340 		delay(100*1000); /* XXX too big */
   12341 		timeout--;
   12342 	}
   12343 	if (rv != 0)
   12344 		goto out;
   12345 	switch (val) {
   12346 	case SFF_SFP_ID_SFF:
   12347 		aprint_normal_dev(sc->sc_dev,
   12348 		    "Module/Connector soldered to board\n");
   12349 		break;
   12350 	case SFF_SFP_ID_SFP:
   12351 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12352 		break;
   12353 	case SFF_SFP_ID_UNKNOWN:
   12354 		goto out;
   12355 	default:
   12356 		break;
   12357 	}
   12358 
   12359 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12360 	if (rv != 0) {
   12361 		goto out;
   12362 	}
   12363 
   12364 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12365 		mediatype = WM_MEDIATYPE_SERDES;
   12366 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12367 		sc->sc_flags |= WM_F_SGMII;
   12368 		mediatype = WM_MEDIATYPE_COPPER;
   12369 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12370 		sc->sc_flags |= WM_F_SGMII;
   12371 		mediatype = WM_MEDIATYPE_SERDES;
   12372 	}
   12373 
   12374 out:
   12375 	/* Restore I2C interface setting */
   12376 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12377 
   12378 	return mediatype;
   12379 }
   12380 
   12381 /*
   12382  * NVM related.
   12383  * Microwire, SPI (w/wo EERD) and Flash.
   12384  */
   12385 
   12386 /* Both spi and uwire */
   12387 
   12388 /*
   12389  * wm_eeprom_sendbits:
   12390  *
   12391  *	Send a series of bits to the EEPROM.
   12392  */
   12393 static void
   12394 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12395 {
   12396 	uint32_t reg;
   12397 	int x;
   12398 
   12399 	reg = CSR_READ(sc, WMREG_EECD);
   12400 
   12401 	for (x = nbits; x > 0; x--) {
   12402 		if (bits & (1U << (x - 1)))
   12403 			reg |= EECD_DI;
   12404 		else
   12405 			reg &= ~EECD_DI;
   12406 		CSR_WRITE(sc, WMREG_EECD, reg);
   12407 		CSR_WRITE_FLUSH(sc);
   12408 		delay(2);
   12409 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12410 		CSR_WRITE_FLUSH(sc);
   12411 		delay(2);
   12412 		CSR_WRITE(sc, WMREG_EECD, reg);
   12413 		CSR_WRITE_FLUSH(sc);
   12414 		delay(2);
   12415 	}
   12416 }
   12417 
   12418 /*
   12419  * wm_eeprom_recvbits:
   12420  *
   12421  *	Receive a series of bits from the EEPROM.
   12422  */
   12423 static void
   12424 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12425 {
   12426 	uint32_t reg, val;
   12427 	int x;
   12428 
   12429 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12430 
   12431 	val = 0;
   12432 	for (x = nbits; x > 0; x--) {
   12433 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12434 		CSR_WRITE_FLUSH(sc);
   12435 		delay(2);
   12436 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12437 			val |= (1U << (x - 1));
   12438 		CSR_WRITE(sc, WMREG_EECD, reg);
   12439 		CSR_WRITE_FLUSH(sc);
   12440 		delay(2);
   12441 	}
   12442 	*valp = val;
   12443 }
   12444 
   12445 /* Microwire */
   12446 
   12447 /*
   12448  * wm_nvm_read_uwire:
   12449  *
   12450  *	Read a word from the EEPROM using the MicroWire protocol.
   12451  */
   12452 static int
   12453 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12454 {
   12455 	uint32_t reg, val;
   12456 	int i;
   12457 
   12458 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12459 		device_xname(sc->sc_dev), __func__));
   12460 
   12461 	if (sc->nvm.acquire(sc) != 0)
   12462 		return -1;
   12463 
   12464 	for (i = 0; i < wordcnt; i++) {
   12465 		/* Clear SK and DI. */
   12466 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12467 		CSR_WRITE(sc, WMREG_EECD, reg);
   12468 
   12469 		/*
   12470 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12471 		 * and Xen.
   12472 		 *
   12473 		 * We use this workaround only for 82540 because qemu's
   12474 		 * e1000 act as 82540.
   12475 		 */
   12476 		if (sc->sc_type == WM_T_82540) {
   12477 			reg |= EECD_SK;
   12478 			CSR_WRITE(sc, WMREG_EECD, reg);
   12479 			reg &= ~EECD_SK;
   12480 			CSR_WRITE(sc, WMREG_EECD, reg);
   12481 			CSR_WRITE_FLUSH(sc);
   12482 			delay(2);
   12483 		}
   12484 		/* XXX: end of workaround */
   12485 
   12486 		/* Set CHIP SELECT. */
   12487 		reg |= EECD_CS;
   12488 		CSR_WRITE(sc, WMREG_EECD, reg);
   12489 		CSR_WRITE_FLUSH(sc);
   12490 		delay(2);
   12491 
   12492 		/* Shift in the READ command. */
   12493 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12494 
   12495 		/* Shift in address. */
   12496 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12497 
   12498 		/* Shift out the data. */
   12499 		wm_eeprom_recvbits(sc, &val, 16);
   12500 		data[i] = val & 0xffff;
   12501 
   12502 		/* Clear CHIP SELECT. */
   12503 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12504 		CSR_WRITE(sc, WMREG_EECD, reg);
   12505 		CSR_WRITE_FLUSH(sc);
   12506 		delay(2);
   12507 	}
   12508 
   12509 	sc->nvm.release(sc);
   12510 	return 0;
   12511 }
   12512 
   12513 /* SPI */
   12514 
   12515 /*
   12516  * Set SPI and FLASH related information from the EECD register.
   12517  * For 82541 and 82547, the word size is taken from EEPROM.
   12518  */
   12519 static int
   12520 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12521 {
   12522 	int size;
   12523 	uint32_t reg;
   12524 	uint16_t data;
   12525 
   12526 	reg = CSR_READ(sc, WMREG_EECD);
   12527 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12528 
   12529 	/* Read the size of NVM from EECD by default */
   12530 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12531 	switch (sc->sc_type) {
   12532 	case WM_T_82541:
   12533 	case WM_T_82541_2:
   12534 	case WM_T_82547:
   12535 	case WM_T_82547_2:
   12536 		/* Set dummy value to access EEPROM */
   12537 		sc->sc_nvm_wordsize = 64;
   12538 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12539 			aprint_error_dev(sc->sc_dev,
   12540 			    "%s: failed to read EEPROM size\n", __func__);
   12541 		}
   12542 		reg = data;
   12543 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12544 		if (size == 0)
   12545 			size = 6; /* 64 word size */
   12546 		else
   12547 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12548 		break;
   12549 	case WM_T_80003:
   12550 	case WM_T_82571:
   12551 	case WM_T_82572:
   12552 	case WM_T_82573: /* SPI case */
   12553 	case WM_T_82574: /* SPI case */
   12554 	case WM_T_82583: /* SPI case */
   12555 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12556 		if (size > 14)
   12557 			size = 14;
   12558 		break;
   12559 	case WM_T_82575:
   12560 	case WM_T_82576:
   12561 	case WM_T_82580:
   12562 	case WM_T_I350:
   12563 	case WM_T_I354:
   12564 	case WM_T_I210:
   12565 	case WM_T_I211:
   12566 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12567 		if (size > 15)
   12568 			size = 15;
   12569 		break;
   12570 	default:
   12571 		aprint_error_dev(sc->sc_dev,
   12572 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12573 		return -1;
   12574 		break;
   12575 	}
   12576 
   12577 	sc->sc_nvm_wordsize = 1 << size;
   12578 
   12579 	return 0;
   12580 }
   12581 
   12582 /*
   12583  * wm_nvm_ready_spi:
   12584  *
   12585  *	Wait for a SPI EEPROM to be ready for commands.
   12586  */
   12587 static int
   12588 wm_nvm_ready_spi(struct wm_softc *sc)
   12589 {
   12590 	uint32_t val;
   12591 	int usec;
   12592 
   12593 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12594 		device_xname(sc->sc_dev), __func__));
   12595 
   12596 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12597 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12598 		wm_eeprom_recvbits(sc, &val, 8);
   12599 		if ((val & SPI_SR_RDY) == 0)
   12600 			break;
   12601 	}
   12602 	if (usec >= SPI_MAX_RETRIES) {
   12603 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12604 		return -1;
   12605 	}
   12606 	return 0;
   12607 }
   12608 
   12609 /*
   12610  * wm_nvm_read_spi:
   12611  *
   12612  *	Read a work from the EEPROM using the SPI protocol.
   12613  */
   12614 static int
   12615 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12616 {
   12617 	uint32_t reg, val;
   12618 	int i;
   12619 	uint8_t opc;
   12620 	int rv = 0;
   12621 
   12622 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12623 		device_xname(sc->sc_dev), __func__));
   12624 
   12625 	if (sc->nvm.acquire(sc) != 0)
   12626 		return -1;
   12627 
   12628 	/* Clear SK and CS. */
   12629 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12630 	CSR_WRITE(sc, WMREG_EECD, reg);
   12631 	CSR_WRITE_FLUSH(sc);
   12632 	delay(2);
   12633 
   12634 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12635 		goto out;
   12636 
   12637 	/* Toggle CS to flush commands. */
   12638 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12639 	CSR_WRITE_FLUSH(sc);
   12640 	delay(2);
   12641 	CSR_WRITE(sc, WMREG_EECD, reg);
   12642 	CSR_WRITE_FLUSH(sc);
   12643 	delay(2);
   12644 
   12645 	opc = SPI_OPC_READ;
   12646 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12647 		opc |= SPI_OPC_A8;
   12648 
   12649 	wm_eeprom_sendbits(sc, opc, 8);
   12650 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12651 
   12652 	for (i = 0; i < wordcnt; i++) {
   12653 		wm_eeprom_recvbits(sc, &val, 16);
   12654 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12655 	}
   12656 
   12657 	/* Raise CS and clear SK. */
   12658 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12659 	CSR_WRITE(sc, WMREG_EECD, reg);
   12660 	CSR_WRITE_FLUSH(sc);
   12661 	delay(2);
   12662 
   12663 out:
   12664 	sc->nvm.release(sc);
   12665 	return rv;
   12666 }
   12667 
   12668 /* Using with EERD */
   12669 
   12670 static int
   12671 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12672 {
   12673 	uint32_t attempts = 100000;
   12674 	uint32_t i, reg = 0;
   12675 	int32_t done = -1;
   12676 
   12677 	for (i = 0; i < attempts; i++) {
   12678 		reg = CSR_READ(sc, rw);
   12679 
   12680 		if (reg & EERD_DONE) {
   12681 			done = 0;
   12682 			break;
   12683 		}
   12684 		delay(5);
   12685 	}
   12686 
   12687 	return done;
   12688 }
   12689 
   12690 static int
   12691 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12692 {
   12693 	int i, eerd = 0;
   12694 	int rv = 0;
   12695 
   12696 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12697 		device_xname(sc->sc_dev), __func__));
   12698 
   12699 	if (sc->nvm.acquire(sc) != 0)
   12700 		return -1;
   12701 
   12702 	for (i = 0; i < wordcnt; i++) {
   12703 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12704 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12705 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12706 		if (rv != 0) {
   12707 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12708 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12709 			break;
   12710 		}
   12711 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12712 	}
   12713 
   12714 	sc->nvm.release(sc);
   12715 	return rv;
   12716 }
   12717 
   12718 /* Flash */
   12719 
   12720 static int
   12721 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12722 {
   12723 	uint32_t eecd;
   12724 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12725 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12726 	uint32_t nvm_dword = 0;
   12727 	uint8_t sig_byte = 0;
   12728 	int rv;
   12729 
   12730 	switch (sc->sc_type) {
   12731 	case WM_T_PCH_SPT:
   12732 	case WM_T_PCH_CNP:
   12733 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12734 		act_offset = ICH_NVM_SIG_WORD * 2;
   12735 
   12736 		/* Set bank to 0 in case flash read fails. */
   12737 		*bank = 0;
   12738 
   12739 		/* Check bank 0 */
   12740 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12741 		if (rv != 0)
   12742 			return rv;
   12743 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12744 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12745 			*bank = 0;
   12746 			return 0;
   12747 		}
   12748 
   12749 		/* Check bank 1 */
   12750 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12751 		    &nvm_dword);
   12752 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12753 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12754 			*bank = 1;
   12755 			return 0;
   12756 		}
   12757 		aprint_error_dev(sc->sc_dev,
   12758 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12759 		return -1;
   12760 	case WM_T_ICH8:
   12761 	case WM_T_ICH9:
   12762 		eecd = CSR_READ(sc, WMREG_EECD);
   12763 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12764 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12765 			return 0;
   12766 		}
   12767 		/* FALLTHROUGH */
   12768 	default:
   12769 		/* Default to 0 */
   12770 		*bank = 0;
   12771 
   12772 		/* Check bank 0 */
   12773 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12774 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12775 			*bank = 0;
   12776 			return 0;
   12777 		}
   12778 
   12779 		/* Check bank 1 */
   12780 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12781 		    &sig_byte);
   12782 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12783 			*bank = 1;
   12784 			return 0;
   12785 		}
   12786 	}
   12787 
   12788 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12789 		device_xname(sc->sc_dev)));
   12790 	return -1;
   12791 }
   12792 
   12793 /******************************************************************************
   12794  * This function does initial flash setup so that a new read/write/erase cycle
   12795  * can be started.
   12796  *
   12797  * sc - The pointer to the hw structure
   12798  ****************************************************************************/
   12799 static int32_t
   12800 wm_ich8_cycle_init(struct wm_softc *sc)
   12801 {
   12802 	uint16_t hsfsts;
   12803 	int32_t error = 1;
   12804 	int32_t i     = 0;
   12805 
   12806 	if (sc->sc_type >= WM_T_PCH_SPT)
   12807 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12808 	else
   12809 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12810 
   12811 	/* May be check the Flash Des Valid bit in Hw status */
   12812 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12813 		return error;
   12814 
   12815 	/* Clear FCERR in Hw status by writing 1 */
   12816 	/* Clear DAEL in Hw status by writing a 1 */
   12817 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12818 
   12819 	if (sc->sc_type >= WM_T_PCH_SPT)
   12820 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12821 	else
   12822 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12823 
   12824 	/*
   12825 	 * Either we should have a hardware SPI cycle in progress bit to check
   12826 	 * against, in order to start a new cycle or FDONE bit should be
   12827 	 * changed in the hardware so that it is 1 after harware reset, which
   12828 	 * can then be used as an indication whether a cycle is in progress or
   12829 	 * has been completed .. we should also have some software semaphore
   12830 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12831 	 * threads access to those bits can be sequentiallized or a way so that
   12832 	 * 2 threads dont start the cycle at the same time
   12833 	 */
   12834 
   12835 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12836 		/*
   12837 		 * There is no cycle running at present, so we can start a
   12838 		 * cycle
   12839 		 */
   12840 
   12841 		/* Begin by setting Flash Cycle Done. */
   12842 		hsfsts |= HSFSTS_DONE;
   12843 		if (sc->sc_type >= WM_T_PCH_SPT)
   12844 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12845 			    hsfsts & 0xffffUL);
   12846 		else
   12847 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12848 		error = 0;
   12849 	} else {
   12850 		/*
   12851 		 * Otherwise poll for sometime so the current cycle has a
   12852 		 * chance to end before giving up.
   12853 		 */
   12854 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12855 			if (sc->sc_type >= WM_T_PCH_SPT)
   12856 				hsfsts = ICH8_FLASH_READ32(sc,
   12857 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12858 			else
   12859 				hsfsts = ICH8_FLASH_READ16(sc,
   12860 				    ICH_FLASH_HSFSTS);
   12861 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12862 				error = 0;
   12863 				break;
   12864 			}
   12865 			delay(1);
   12866 		}
   12867 		if (error == 0) {
   12868 			/*
   12869 			 * Successful in waiting for previous cycle to timeout,
   12870 			 * now set the Flash Cycle Done.
   12871 			 */
   12872 			hsfsts |= HSFSTS_DONE;
   12873 			if (sc->sc_type >= WM_T_PCH_SPT)
   12874 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12875 				    hsfsts & 0xffffUL);
   12876 			else
   12877 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12878 				    hsfsts);
   12879 		}
   12880 	}
   12881 	return error;
   12882 }
   12883 
   12884 /******************************************************************************
   12885  * This function starts a flash cycle and waits for its completion
   12886  *
   12887  * sc - The pointer to the hw structure
   12888  ****************************************************************************/
   12889 static int32_t
   12890 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12891 {
   12892 	uint16_t hsflctl;
   12893 	uint16_t hsfsts;
   12894 	int32_t error = 1;
   12895 	uint32_t i = 0;
   12896 
   12897 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12898 	if (sc->sc_type >= WM_T_PCH_SPT)
   12899 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12900 	else
   12901 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12902 	hsflctl |= HSFCTL_GO;
   12903 	if (sc->sc_type >= WM_T_PCH_SPT)
   12904 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12905 		    (uint32_t)hsflctl << 16);
   12906 	else
   12907 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12908 
   12909 	/* Wait till FDONE bit is set to 1 */
   12910 	do {
   12911 		if (sc->sc_type >= WM_T_PCH_SPT)
   12912 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12913 			    & 0xffffUL;
   12914 		else
   12915 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12916 		if (hsfsts & HSFSTS_DONE)
   12917 			break;
   12918 		delay(1);
   12919 		i++;
   12920 	} while (i < timeout);
   12921 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12922 		error = 0;
   12923 
   12924 	return error;
   12925 }
   12926 
   12927 /******************************************************************************
   12928  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12929  *
   12930  * sc - The pointer to the hw structure
   12931  * index - The index of the byte or word to read.
   12932  * size - Size of data to read, 1=byte 2=word, 4=dword
   12933  * data - Pointer to the word to store the value read.
   12934  *****************************************************************************/
   12935 static int32_t
   12936 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12937     uint32_t size, uint32_t *data)
   12938 {
   12939 	uint16_t hsfsts;
   12940 	uint16_t hsflctl;
   12941 	uint32_t flash_linear_address;
   12942 	uint32_t flash_data = 0;
   12943 	int32_t error = 1;
   12944 	int32_t count = 0;
   12945 
   12946 	if (size < 1  || size > 4 || data == 0x0 ||
   12947 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12948 		return error;
   12949 
   12950 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12951 	    sc->sc_ich8_flash_base;
   12952 
   12953 	do {
   12954 		delay(1);
   12955 		/* Steps */
   12956 		error = wm_ich8_cycle_init(sc);
   12957 		if (error)
   12958 			break;
   12959 
   12960 		if (sc->sc_type >= WM_T_PCH_SPT)
   12961 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12962 			    >> 16;
   12963 		else
   12964 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12965 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12966 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12967 		    & HSFCTL_BCOUNT_MASK;
   12968 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12969 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12970 			/*
   12971 			 * In SPT, This register is in Lan memory space, not
   12972 			 * flash. Therefore, only 32 bit access is supported.
   12973 			 */
   12974 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12975 			    (uint32_t)hsflctl << 16);
   12976 		} else
   12977 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12978 
   12979 		/*
   12980 		 * Write the last 24 bits of index into Flash Linear address
   12981 		 * field in Flash Address
   12982 		 */
   12983 		/* TODO: TBD maybe check the index against the size of flash */
   12984 
   12985 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12986 
   12987 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12988 
   12989 		/*
   12990 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12991 		 * the whole sequence a few more times, else read in (shift in)
   12992 		 * the Flash Data0, the order is least significant byte first
   12993 		 * msb to lsb
   12994 		 */
   12995 		if (error == 0) {
   12996 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12997 			if (size == 1)
   12998 				*data = (uint8_t)(flash_data & 0x000000FF);
   12999 			else if (size == 2)
   13000 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13001 			else if (size == 4)
   13002 				*data = (uint32_t)flash_data;
   13003 			break;
   13004 		} else {
   13005 			/*
   13006 			 * If we've gotten here, then things are probably
   13007 			 * completely hosed, but if the error condition is
   13008 			 * detected, it won't hurt to give it another try...
   13009 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13010 			 */
   13011 			if (sc->sc_type >= WM_T_PCH_SPT)
   13012 				hsfsts = ICH8_FLASH_READ32(sc,
   13013 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13014 			else
   13015 				hsfsts = ICH8_FLASH_READ16(sc,
   13016 				    ICH_FLASH_HSFSTS);
   13017 
   13018 			if (hsfsts & HSFSTS_ERR) {
   13019 				/* Repeat for some time before giving up. */
   13020 				continue;
   13021 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13022 				break;
   13023 		}
   13024 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13025 
   13026 	return error;
   13027 }
   13028 
   13029 /******************************************************************************
   13030  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13031  *
   13032  * sc - pointer to wm_hw structure
   13033  * index - The index of the byte to read.
   13034  * data - Pointer to a byte to store the value read.
   13035  *****************************************************************************/
   13036 static int32_t
   13037 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13038 {
   13039 	int32_t status;
   13040 	uint32_t word = 0;
   13041 
   13042 	status = wm_read_ich8_data(sc, index, 1, &word);
   13043 	if (status == 0)
   13044 		*data = (uint8_t)word;
   13045 	else
   13046 		*data = 0;
   13047 
   13048 	return status;
   13049 }
   13050 
   13051 /******************************************************************************
   13052  * Reads a word from the NVM using the ICH8 flash access registers.
   13053  *
   13054  * sc - pointer to wm_hw structure
   13055  * index - The starting byte index of the word to read.
   13056  * data - Pointer to a word to store the value read.
   13057  *****************************************************************************/
   13058 static int32_t
   13059 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13060 {
   13061 	int32_t status;
   13062 	uint32_t word = 0;
   13063 
   13064 	status = wm_read_ich8_data(sc, index, 2, &word);
   13065 	if (status == 0)
   13066 		*data = (uint16_t)word;
   13067 	else
   13068 		*data = 0;
   13069 
   13070 	return status;
   13071 }
   13072 
   13073 /******************************************************************************
   13074  * Reads a dword from the NVM using the ICH8 flash access registers.
   13075  *
   13076  * sc - pointer to wm_hw structure
   13077  * index - The starting byte index of the word to read.
   13078  * data - Pointer to a word to store the value read.
   13079  *****************************************************************************/
   13080 static int32_t
   13081 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13082 {
   13083 	int32_t status;
   13084 
   13085 	status = wm_read_ich8_data(sc, index, 4, data);
   13086 	return status;
   13087 }
   13088 
   13089 /******************************************************************************
   13090  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13091  * register.
   13092  *
   13093  * sc - Struct containing variables accessed by shared code
   13094  * offset - offset of word in the EEPROM to read
   13095  * data - word read from the EEPROM
   13096  * words - number of words to read
   13097  *****************************************************************************/
   13098 static int
   13099 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13100 {
   13101 	int32_t	 rv = 0;
   13102 	uint32_t flash_bank = 0;
   13103 	uint32_t act_offset = 0;
   13104 	uint32_t bank_offset = 0;
   13105 	uint16_t word = 0;
   13106 	uint16_t i = 0;
   13107 
   13108 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13109 		device_xname(sc->sc_dev), __func__));
   13110 
   13111 	if (sc->nvm.acquire(sc) != 0)
   13112 		return -1;
   13113 
   13114 	/*
   13115 	 * We need to know which is the valid flash bank.  In the event
   13116 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13117 	 * managing flash_bank. So it cannot be trusted and needs
   13118 	 * to be updated with each read.
   13119 	 */
   13120 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13121 	if (rv) {
   13122 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13123 			device_xname(sc->sc_dev)));
   13124 		flash_bank = 0;
   13125 	}
   13126 
   13127 	/*
   13128 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13129 	 * size
   13130 	 */
   13131 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13132 
   13133 	for (i = 0; i < words; i++) {
   13134 		/* The NVM part needs a byte offset, hence * 2 */
   13135 		act_offset = bank_offset + ((offset + i) * 2);
   13136 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13137 		if (rv) {
   13138 			aprint_error_dev(sc->sc_dev,
   13139 			    "%s: failed to read NVM\n", __func__);
   13140 			break;
   13141 		}
   13142 		data[i] = word;
   13143 	}
   13144 
   13145 	sc->nvm.release(sc);
   13146 	return rv;
   13147 }
   13148 
   13149 /******************************************************************************
   13150  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13151  * register.
   13152  *
   13153  * sc - Struct containing variables accessed by shared code
   13154  * offset - offset of word in the EEPROM to read
   13155  * data - word read from the EEPROM
   13156  * words - number of words to read
   13157  *****************************************************************************/
   13158 static int
   13159 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13160 {
   13161 	int32_t	 rv = 0;
   13162 	uint32_t flash_bank = 0;
   13163 	uint32_t act_offset = 0;
   13164 	uint32_t bank_offset = 0;
   13165 	uint32_t dword = 0;
   13166 	uint16_t i = 0;
   13167 
   13168 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13169 		device_xname(sc->sc_dev), __func__));
   13170 
   13171 	if (sc->nvm.acquire(sc) != 0)
   13172 		return -1;
   13173 
   13174 	/*
   13175 	 * We need to know which is the valid flash bank.  In the event
   13176 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13177 	 * managing flash_bank. So it cannot be trusted and needs
   13178 	 * to be updated with each read.
   13179 	 */
   13180 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13181 	if (rv) {
   13182 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13183 			device_xname(sc->sc_dev)));
   13184 		flash_bank = 0;
   13185 	}
   13186 
   13187 	/*
   13188 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13189 	 * size
   13190 	 */
   13191 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13192 
   13193 	for (i = 0; i < words; i++) {
   13194 		/* The NVM part needs a byte offset, hence * 2 */
   13195 		act_offset = bank_offset + ((offset + i) * 2);
   13196 		/* but we must read dword aligned, so mask ... */
   13197 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13198 		if (rv) {
   13199 			aprint_error_dev(sc->sc_dev,
   13200 			    "%s: failed to read NVM\n", __func__);
   13201 			break;
   13202 		}
   13203 		/* ... and pick out low or high word */
   13204 		if ((act_offset & 0x2) == 0)
   13205 			data[i] = (uint16_t)(dword & 0xFFFF);
   13206 		else
   13207 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13208 	}
   13209 
   13210 	sc->nvm.release(sc);
   13211 	return rv;
   13212 }
   13213 
   13214 /* iNVM */
   13215 
   13216 static int
   13217 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13218 {
   13219 	int32_t	 rv = 0;
   13220 	uint32_t invm_dword;
   13221 	uint16_t i;
   13222 	uint8_t record_type, word_address;
   13223 
   13224 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13225 		device_xname(sc->sc_dev), __func__));
   13226 
   13227 	for (i = 0; i < INVM_SIZE; i++) {
   13228 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13229 		/* Get record type */
   13230 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13231 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13232 			break;
   13233 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13234 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13235 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13236 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13237 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13238 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13239 			if (word_address == address) {
   13240 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13241 				rv = 0;
   13242 				break;
   13243 			}
   13244 		}
   13245 	}
   13246 
   13247 	return rv;
   13248 }
   13249 
   13250 static int
   13251 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13252 {
   13253 	int rv = 0;
   13254 	int i;
   13255 
   13256 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13257 		device_xname(sc->sc_dev), __func__));
   13258 
   13259 	if (sc->nvm.acquire(sc) != 0)
   13260 		return -1;
   13261 
   13262 	for (i = 0; i < words; i++) {
   13263 		switch (offset + i) {
   13264 		case NVM_OFF_MACADDR:
   13265 		case NVM_OFF_MACADDR1:
   13266 		case NVM_OFF_MACADDR2:
   13267 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13268 			if (rv != 0) {
   13269 				data[i] = 0xffff;
   13270 				rv = -1;
   13271 			}
   13272 			break;
   13273 		case NVM_OFF_CFG2:
   13274 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13275 			if (rv != 0) {
   13276 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13277 				rv = 0;
   13278 			}
   13279 			break;
   13280 		case NVM_OFF_CFG4:
   13281 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13282 			if (rv != 0) {
   13283 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13284 				rv = 0;
   13285 			}
   13286 			break;
   13287 		case NVM_OFF_LED_1_CFG:
   13288 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13289 			if (rv != 0) {
   13290 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13291 				rv = 0;
   13292 			}
   13293 			break;
   13294 		case NVM_OFF_LED_0_2_CFG:
   13295 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13296 			if (rv != 0) {
   13297 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13298 				rv = 0;
   13299 			}
   13300 			break;
   13301 		case NVM_OFF_ID_LED_SETTINGS:
   13302 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13303 			if (rv != 0) {
   13304 				*data = ID_LED_RESERVED_FFFF;
   13305 				rv = 0;
   13306 			}
   13307 			break;
   13308 		default:
   13309 			DPRINTF(WM_DEBUG_NVM,
   13310 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13311 			*data = NVM_RESERVED_WORD;
   13312 			break;
   13313 		}
   13314 	}
   13315 
   13316 	sc->nvm.release(sc);
   13317 	return rv;
   13318 }
   13319 
   13320 /* Lock, detecting NVM type, validate checksum, version and read */
   13321 
   13322 static int
   13323 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13324 {
   13325 	uint32_t eecd = 0;
   13326 
   13327 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13328 	    || sc->sc_type == WM_T_82583) {
   13329 		eecd = CSR_READ(sc, WMREG_EECD);
   13330 
   13331 		/* Isolate bits 15 & 16 */
   13332 		eecd = ((eecd >> 15) & 0x03);
   13333 
   13334 		/* If both bits are set, device is Flash type */
   13335 		if (eecd == 0x03)
   13336 			return 0;
   13337 	}
   13338 	return 1;
   13339 }
   13340 
   13341 static int
   13342 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13343 {
   13344 	uint32_t eec;
   13345 
   13346 	eec = CSR_READ(sc, WMREG_EEC);
   13347 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13348 		return 1;
   13349 
   13350 	return 0;
   13351 }
   13352 
   13353 /*
   13354  * wm_nvm_validate_checksum
   13355  *
   13356  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13357  */
   13358 static int
   13359 wm_nvm_validate_checksum(struct wm_softc *sc)
   13360 {
   13361 	uint16_t checksum;
   13362 	uint16_t eeprom_data;
   13363 #ifdef WM_DEBUG
   13364 	uint16_t csum_wordaddr, valid_checksum;
   13365 #endif
   13366 	int i;
   13367 
   13368 	checksum = 0;
   13369 
   13370 	/* Don't check for I211 */
   13371 	if (sc->sc_type == WM_T_I211)
   13372 		return 0;
   13373 
   13374 #ifdef WM_DEBUG
   13375 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13376 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13377 		csum_wordaddr = NVM_OFF_COMPAT;
   13378 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13379 	} else {
   13380 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13381 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13382 	}
   13383 
   13384 	/* Dump EEPROM image for debug */
   13385 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13386 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13387 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13388 		/* XXX PCH_SPT? */
   13389 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13390 		if ((eeprom_data & valid_checksum) == 0)
   13391 			DPRINTF(WM_DEBUG_NVM,
   13392 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13393 				device_xname(sc->sc_dev), eeprom_data,
   13394 				    valid_checksum));
   13395 	}
   13396 
   13397 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13398 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13399 		for (i = 0; i < NVM_SIZE; i++) {
   13400 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13401 				printf("XXXX ");
   13402 			else
   13403 				printf("%04hx ", eeprom_data);
   13404 			if (i % 8 == 7)
   13405 				printf("\n");
   13406 		}
   13407 	}
   13408 
   13409 #endif /* WM_DEBUG */
   13410 
   13411 	for (i = 0; i < NVM_SIZE; i++) {
   13412 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13413 			return 1;
   13414 		checksum += eeprom_data;
   13415 	}
   13416 
   13417 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13418 #ifdef WM_DEBUG
   13419 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13420 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13421 #endif
   13422 	}
   13423 
   13424 	return 0;
   13425 }
   13426 
   13427 static void
   13428 wm_nvm_version_invm(struct wm_softc *sc)
   13429 {
   13430 	uint32_t dword;
   13431 
   13432 	/*
   13433 	 * Linux's code to decode version is very strange, so we don't
   13434 	 * obey that algorithm and just use word 61 as the document.
   13435 	 * Perhaps it's not perfect though...
   13436 	 *
   13437 	 * Example:
   13438 	 *
   13439 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13440 	 */
   13441 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13442 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13443 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13444 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13445 }
   13446 
   13447 static void
   13448 wm_nvm_version(struct wm_softc *sc)
   13449 {
   13450 	uint16_t major, minor, build, patch;
   13451 	uint16_t uid0, uid1;
   13452 	uint16_t nvm_data;
   13453 	uint16_t off;
   13454 	bool check_version = false;
   13455 	bool check_optionrom = false;
   13456 	bool have_build = false;
   13457 	bool have_uid = true;
   13458 
   13459 	/*
   13460 	 * Version format:
   13461 	 *
   13462 	 * XYYZ
   13463 	 * X0YZ
   13464 	 * X0YY
   13465 	 *
   13466 	 * Example:
   13467 	 *
   13468 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13469 	 *	82571	0x50a6	5.10.6?
   13470 	 *	82572	0x506a	5.6.10?
   13471 	 *	82572EI	0x5069	5.6.9?
   13472 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13473 	 *		0x2013	2.1.3?
   13474 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13475 	 */
   13476 
   13477 	/*
   13478 	 * XXX
   13479 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13480 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13481 	 */
   13482 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13483 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13484 		have_uid = false;
   13485 
   13486 	switch (sc->sc_type) {
   13487 	case WM_T_82571:
   13488 	case WM_T_82572:
   13489 	case WM_T_82574:
   13490 	case WM_T_82583:
   13491 		check_version = true;
   13492 		check_optionrom = true;
   13493 		have_build = true;
   13494 		break;
   13495 	case WM_T_82575:
   13496 	case WM_T_82576:
   13497 	case WM_T_82580:
   13498 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13499 			check_version = true;
   13500 		break;
   13501 	case WM_T_I211:
   13502 		wm_nvm_version_invm(sc);
   13503 		have_uid = false;
   13504 		goto printver;
   13505 	case WM_T_I210:
   13506 		if (!wm_nvm_flash_presence_i210(sc)) {
   13507 			wm_nvm_version_invm(sc);
   13508 			have_uid = false;
   13509 			goto printver;
   13510 		}
   13511 		/* FALLTHROUGH */
   13512 	case WM_T_I350:
   13513 	case WM_T_I354:
   13514 		check_version = true;
   13515 		check_optionrom = true;
   13516 		break;
   13517 	default:
   13518 		return;
   13519 	}
   13520 	if (check_version
   13521 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13522 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13523 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13524 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13525 			build = nvm_data & NVM_BUILD_MASK;
   13526 			have_build = true;
   13527 		} else
   13528 			minor = nvm_data & 0x00ff;
   13529 
   13530 		/* Decimal */
   13531 		minor = (minor / 16) * 10 + (minor % 16);
   13532 		sc->sc_nvm_ver_major = major;
   13533 		sc->sc_nvm_ver_minor = minor;
   13534 
   13535 printver:
   13536 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13537 		    sc->sc_nvm_ver_minor);
   13538 		if (have_build) {
   13539 			sc->sc_nvm_ver_build = build;
   13540 			aprint_verbose(".%d", build);
   13541 		}
   13542 	}
   13543 
   13544 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13545 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13546 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13547 		/* Option ROM Version */
   13548 		if ((off != 0x0000) && (off != 0xffff)) {
   13549 			int rv;
   13550 
   13551 			off += NVM_COMBO_VER_OFF;
   13552 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13553 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13554 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13555 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13556 				/* 16bits */
   13557 				major = uid0 >> 8;
   13558 				build = (uid0 << 8) | (uid1 >> 8);
   13559 				patch = uid1 & 0x00ff;
   13560 				aprint_verbose(", option ROM Version %d.%d.%d",
   13561 				    major, build, patch);
   13562 			}
   13563 		}
   13564 	}
   13565 
   13566 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13567 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13568 }
   13569 
   13570 /*
   13571  * wm_nvm_read:
   13572  *
   13573  *	Read data from the serial EEPROM.
   13574  */
   13575 static int
   13576 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13577 {
   13578 	int rv;
   13579 
   13580 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13581 		device_xname(sc->sc_dev), __func__));
   13582 
   13583 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13584 		return -1;
   13585 
   13586 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13587 
   13588 	return rv;
   13589 }
   13590 
   13591 /*
   13592  * Hardware semaphores.
   13593  * Very complexed...
   13594  */
   13595 
   13596 static int
   13597 wm_get_null(struct wm_softc *sc)
   13598 {
   13599 
   13600 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13601 		device_xname(sc->sc_dev), __func__));
   13602 	return 0;
   13603 }
   13604 
   13605 static void
   13606 wm_put_null(struct wm_softc *sc)
   13607 {
   13608 
   13609 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13610 		device_xname(sc->sc_dev), __func__));
   13611 	return;
   13612 }
   13613 
   13614 static int
   13615 wm_get_eecd(struct wm_softc *sc)
   13616 {
   13617 	uint32_t reg;
   13618 	int x;
   13619 
   13620 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13621 		device_xname(sc->sc_dev), __func__));
   13622 
   13623 	reg = CSR_READ(sc, WMREG_EECD);
   13624 
   13625 	/* Request EEPROM access. */
   13626 	reg |= EECD_EE_REQ;
   13627 	CSR_WRITE(sc, WMREG_EECD, reg);
   13628 
   13629 	/* ..and wait for it to be granted. */
   13630 	for (x = 0; x < 1000; x++) {
   13631 		reg = CSR_READ(sc, WMREG_EECD);
   13632 		if (reg & EECD_EE_GNT)
   13633 			break;
   13634 		delay(5);
   13635 	}
   13636 	if ((reg & EECD_EE_GNT) == 0) {
   13637 		aprint_error_dev(sc->sc_dev,
   13638 		    "could not acquire EEPROM GNT\n");
   13639 		reg &= ~EECD_EE_REQ;
   13640 		CSR_WRITE(sc, WMREG_EECD, reg);
   13641 		return -1;
   13642 	}
   13643 
   13644 	return 0;
   13645 }
   13646 
   13647 static void
   13648 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13649 {
   13650 
   13651 	*eecd |= EECD_SK;
   13652 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13653 	CSR_WRITE_FLUSH(sc);
   13654 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13655 		delay(1);
   13656 	else
   13657 		delay(50);
   13658 }
   13659 
   13660 static void
   13661 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13662 {
   13663 
   13664 	*eecd &= ~EECD_SK;
   13665 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13666 	CSR_WRITE_FLUSH(sc);
   13667 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13668 		delay(1);
   13669 	else
   13670 		delay(50);
   13671 }
   13672 
   13673 static void
   13674 wm_put_eecd(struct wm_softc *sc)
   13675 {
   13676 	uint32_t reg;
   13677 
   13678 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13679 		device_xname(sc->sc_dev), __func__));
   13680 
   13681 	/* Stop nvm */
   13682 	reg = CSR_READ(sc, WMREG_EECD);
   13683 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13684 		/* Pull CS high */
   13685 		reg |= EECD_CS;
   13686 		wm_nvm_eec_clock_lower(sc, &reg);
   13687 	} else {
   13688 		/* CS on Microwire is active-high */
   13689 		reg &= ~(EECD_CS | EECD_DI);
   13690 		CSR_WRITE(sc, WMREG_EECD, reg);
   13691 		wm_nvm_eec_clock_raise(sc, &reg);
   13692 		wm_nvm_eec_clock_lower(sc, &reg);
   13693 	}
   13694 
   13695 	reg = CSR_READ(sc, WMREG_EECD);
   13696 	reg &= ~EECD_EE_REQ;
   13697 	CSR_WRITE(sc, WMREG_EECD, reg);
   13698 
   13699 	return;
   13700 }
   13701 
   13702 /*
   13703  * Get hardware semaphore.
   13704  * Same as e1000_get_hw_semaphore_generic()
   13705  */
   13706 static int
   13707 wm_get_swsm_semaphore(struct wm_softc *sc)
   13708 {
   13709 	int32_t timeout;
   13710 	uint32_t swsm;
   13711 
   13712 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13713 		device_xname(sc->sc_dev), __func__));
   13714 	KASSERT(sc->sc_nvm_wordsize > 0);
   13715 
   13716 retry:
   13717 	/* Get the SW semaphore. */
   13718 	timeout = sc->sc_nvm_wordsize + 1;
   13719 	while (timeout) {
   13720 		swsm = CSR_READ(sc, WMREG_SWSM);
   13721 
   13722 		if ((swsm & SWSM_SMBI) == 0)
   13723 			break;
   13724 
   13725 		delay(50);
   13726 		timeout--;
   13727 	}
   13728 
   13729 	if (timeout == 0) {
   13730 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13731 			/*
   13732 			 * In rare circumstances, the SW semaphore may already
   13733 			 * be held unintentionally. Clear the semaphore once
   13734 			 * before giving up.
   13735 			 */
   13736 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13737 			wm_put_swsm_semaphore(sc);
   13738 			goto retry;
   13739 		}
   13740 		aprint_error_dev(sc->sc_dev,
   13741 		    "could not acquire SWSM SMBI\n");
   13742 		return 1;
   13743 	}
   13744 
   13745 	/* Get the FW semaphore. */
   13746 	timeout = sc->sc_nvm_wordsize + 1;
   13747 	while (timeout) {
   13748 		swsm = CSR_READ(sc, WMREG_SWSM);
   13749 		swsm |= SWSM_SWESMBI;
   13750 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13751 		/* If we managed to set the bit we got the semaphore. */
   13752 		swsm = CSR_READ(sc, WMREG_SWSM);
   13753 		if (swsm & SWSM_SWESMBI)
   13754 			break;
   13755 
   13756 		delay(50);
   13757 		timeout--;
   13758 	}
   13759 
   13760 	if (timeout == 0) {
   13761 		aprint_error_dev(sc->sc_dev,
   13762 		    "could not acquire SWSM SWESMBI\n");
   13763 		/* Release semaphores */
   13764 		wm_put_swsm_semaphore(sc);
   13765 		return 1;
   13766 	}
   13767 	return 0;
   13768 }
   13769 
   13770 /*
   13771  * Put hardware semaphore.
   13772  * Same as e1000_put_hw_semaphore_generic()
   13773  */
   13774 static void
   13775 wm_put_swsm_semaphore(struct wm_softc *sc)
   13776 {
   13777 	uint32_t swsm;
   13778 
   13779 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13780 		device_xname(sc->sc_dev), __func__));
   13781 
   13782 	swsm = CSR_READ(sc, WMREG_SWSM);
   13783 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13784 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13785 }
   13786 
   13787 /*
   13788  * Get SW/FW semaphore.
   13789  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13790  */
   13791 static int
   13792 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13793 {
   13794 	uint32_t swfw_sync;
   13795 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13796 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13797 	int timeout;
   13798 
   13799 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13800 		device_xname(sc->sc_dev), __func__));
   13801 
   13802 	if (sc->sc_type == WM_T_80003)
   13803 		timeout = 50;
   13804 	else
   13805 		timeout = 200;
   13806 
   13807 	while (timeout) {
   13808 		if (wm_get_swsm_semaphore(sc)) {
   13809 			aprint_error_dev(sc->sc_dev,
   13810 			    "%s: failed to get semaphore\n",
   13811 			    __func__);
   13812 			return 1;
   13813 		}
   13814 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13815 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13816 			swfw_sync |= swmask;
   13817 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13818 			wm_put_swsm_semaphore(sc);
   13819 			return 0;
   13820 		}
   13821 		wm_put_swsm_semaphore(sc);
   13822 		delay(5000);
   13823 		timeout--;
   13824 	}
   13825 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13826 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13827 	return 1;
   13828 }
   13829 
   13830 static void
   13831 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13832 {
   13833 	uint32_t swfw_sync;
   13834 
   13835 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13836 		device_xname(sc->sc_dev), __func__));
   13837 
   13838 	while (wm_get_swsm_semaphore(sc) != 0)
   13839 		continue;
   13840 
   13841 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13842 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13843 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13844 
   13845 	wm_put_swsm_semaphore(sc);
   13846 }
   13847 
   13848 static int
   13849 wm_get_nvm_80003(struct wm_softc *sc)
   13850 {
   13851 	int rv;
   13852 
   13853 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13854 		device_xname(sc->sc_dev), __func__));
   13855 
   13856 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13857 		aprint_error_dev(sc->sc_dev,
   13858 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   13859 		return rv;
   13860 	}
   13861 
   13862 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13863 	    && (rv = wm_get_eecd(sc)) != 0) {
   13864 		aprint_error_dev(sc->sc_dev,
   13865 		    "%s: failed to get semaphore(EECD)\n", __func__);
   13866 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13867 		return rv;
   13868 	}
   13869 
   13870 	return 0;
   13871 }
   13872 
   13873 static void
   13874 wm_put_nvm_80003(struct wm_softc *sc)
   13875 {
   13876 
   13877 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13878 		device_xname(sc->sc_dev), __func__));
   13879 
   13880 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13881 		wm_put_eecd(sc);
   13882 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13883 }
   13884 
   13885 static int
   13886 wm_get_nvm_82571(struct wm_softc *sc)
   13887 {
   13888 	int rv;
   13889 
   13890 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13891 		device_xname(sc->sc_dev), __func__));
   13892 
   13893 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13894 		return rv;
   13895 
   13896 	switch (sc->sc_type) {
   13897 	case WM_T_82573:
   13898 		break;
   13899 	default:
   13900 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13901 			rv = wm_get_eecd(sc);
   13902 		break;
   13903 	}
   13904 
   13905 	if (rv != 0) {
   13906 		aprint_error_dev(sc->sc_dev,
   13907 		    "%s: failed to get semaphore\n",
   13908 		    __func__);
   13909 		wm_put_swsm_semaphore(sc);
   13910 	}
   13911 
   13912 	return rv;
   13913 }
   13914 
   13915 static void
   13916 wm_put_nvm_82571(struct wm_softc *sc)
   13917 {
   13918 
   13919 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13920 		device_xname(sc->sc_dev), __func__));
   13921 
   13922 	switch (sc->sc_type) {
   13923 	case WM_T_82573:
   13924 		break;
   13925 	default:
   13926 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13927 			wm_put_eecd(sc);
   13928 		break;
   13929 	}
   13930 
   13931 	wm_put_swsm_semaphore(sc);
   13932 }
   13933 
   13934 static int
   13935 wm_get_phy_82575(struct wm_softc *sc)
   13936 {
   13937 
   13938 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13939 		device_xname(sc->sc_dev), __func__));
   13940 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13941 }
   13942 
   13943 static void
   13944 wm_put_phy_82575(struct wm_softc *sc)
   13945 {
   13946 
   13947 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13948 		device_xname(sc->sc_dev), __func__));
   13949 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13950 }
   13951 
   13952 static int
   13953 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13954 {
   13955 	uint32_t ext_ctrl;
   13956 	int timeout = 200;
   13957 
   13958 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13959 		device_xname(sc->sc_dev), __func__));
   13960 
   13961 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13962 	for (timeout = 0; timeout < 200; timeout++) {
   13963 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13964 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13965 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13966 
   13967 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13968 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13969 			return 0;
   13970 		delay(5000);
   13971 	}
   13972 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13973 	    device_xname(sc->sc_dev), ext_ctrl);
   13974 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13975 	return 1;
   13976 }
   13977 
   13978 static void
   13979 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13980 {
   13981 	uint32_t ext_ctrl;
   13982 
   13983 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13984 		device_xname(sc->sc_dev), __func__));
   13985 
   13986 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13987 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13988 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13989 
   13990 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13991 }
   13992 
   13993 static int
   13994 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13995 {
   13996 	uint32_t ext_ctrl;
   13997 	int timeout;
   13998 
   13999 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14000 		device_xname(sc->sc_dev), __func__));
   14001 	mutex_enter(sc->sc_ich_phymtx);
   14002 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14003 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14004 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14005 			break;
   14006 		delay(1000);
   14007 	}
   14008 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14009 		printf("%s: SW has already locked the resource\n",
   14010 		    device_xname(sc->sc_dev));
   14011 		goto out;
   14012 	}
   14013 
   14014 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14015 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14016 	for (timeout = 0; timeout < 1000; timeout++) {
   14017 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14018 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14019 			break;
   14020 		delay(1000);
   14021 	}
   14022 	if (timeout >= 1000) {
   14023 		printf("%s: failed to acquire semaphore\n",
   14024 		    device_xname(sc->sc_dev));
   14025 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14026 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14027 		goto out;
   14028 	}
   14029 	return 0;
   14030 
   14031 out:
   14032 	mutex_exit(sc->sc_ich_phymtx);
   14033 	return 1;
   14034 }
   14035 
   14036 static void
   14037 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14038 {
   14039 	uint32_t ext_ctrl;
   14040 
   14041 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14042 		device_xname(sc->sc_dev), __func__));
   14043 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14044 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14045 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14046 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14047 	} else {
   14048 		printf("%s: Semaphore unexpectedly released\n",
   14049 		    device_xname(sc->sc_dev));
   14050 	}
   14051 
   14052 	mutex_exit(sc->sc_ich_phymtx);
   14053 }
   14054 
   14055 static int
   14056 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14057 {
   14058 
   14059 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14060 		device_xname(sc->sc_dev), __func__));
   14061 	mutex_enter(sc->sc_ich_nvmmtx);
   14062 
   14063 	return 0;
   14064 }
   14065 
   14066 static void
   14067 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14068 {
   14069 
   14070 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14071 		device_xname(sc->sc_dev), __func__));
   14072 	mutex_exit(sc->sc_ich_nvmmtx);
   14073 }
   14074 
   14075 static int
   14076 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14077 {
   14078 	int i = 0;
   14079 	uint32_t reg;
   14080 
   14081 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14082 		device_xname(sc->sc_dev), __func__));
   14083 
   14084 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14085 	do {
   14086 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14087 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14088 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14089 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14090 			break;
   14091 		delay(2*1000);
   14092 		i++;
   14093 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14094 
   14095 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14096 		wm_put_hw_semaphore_82573(sc);
   14097 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14098 		    device_xname(sc->sc_dev));
   14099 		return -1;
   14100 	}
   14101 
   14102 	return 0;
   14103 }
   14104 
   14105 static void
   14106 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14107 {
   14108 	uint32_t reg;
   14109 
   14110 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14111 		device_xname(sc->sc_dev), __func__));
   14112 
   14113 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14114 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14115 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14116 }
   14117 
   14118 /*
   14119  * Management mode and power management related subroutines.
   14120  * BMC, AMT, suspend/resume and EEE.
   14121  */
   14122 
   14123 #ifdef WM_WOL
   14124 static int
   14125 wm_check_mng_mode(struct wm_softc *sc)
   14126 {
   14127 	int rv;
   14128 
   14129 	switch (sc->sc_type) {
   14130 	case WM_T_ICH8:
   14131 	case WM_T_ICH9:
   14132 	case WM_T_ICH10:
   14133 	case WM_T_PCH:
   14134 	case WM_T_PCH2:
   14135 	case WM_T_PCH_LPT:
   14136 	case WM_T_PCH_SPT:
   14137 	case WM_T_PCH_CNP:
   14138 		rv = wm_check_mng_mode_ich8lan(sc);
   14139 		break;
   14140 	case WM_T_82574:
   14141 	case WM_T_82583:
   14142 		rv = wm_check_mng_mode_82574(sc);
   14143 		break;
   14144 	case WM_T_82571:
   14145 	case WM_T_82572:
   14146 	case WM_T_82573:
   14147 	case WM_T_80003:
   14148 		rv = wm_check_mng_mode_generic(sc);
   14149 		break;
   14150 	default:
   14151 		/* Noting to do */
   14152 		rv = 0;
   14153 		break;
   14154 	}
   14155 
   14156 	return rv;
   14157 }
   14158 
   14159 static int
   14160 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14161 {
   14162 	uint32_t fwsm;
   14163 
   14164 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14165 
   14166 	if (((fwsm & FWSM_FW_VALID) != 0)
   14167 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14168 		return 1;
   14169 
   14170 	return 0;
   14171 }
   14172 
   14173 static int
   14174 wm_check_mng_mode_82574(struct wm_softc *sc)
   14175 {
   14176 	uint16_t data;
   14177 
   14178 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14179 
   14180 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14181 		return 1;
   14182 
   14183 	return 0;
   14184 }
   14185 
   14186 static int
   14187 wm_check_mng_mode_generic(struct wm_softc *sc)
   14188 {
   14189 	uint32_t fwsm;
   14190 
   14191 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14192 
   14193 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14194 		return 1;
   14195 
   14196 	return 0;
   14197 }
   14198 #endif /* WM_WOL */
   14199 
   14200 static int
   14201 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14202 {
   14203 	uint32_t manc, fwsm, factps;
   14204 
   14205 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14206 		return 0;
   14207 
   14208 	manc = CSR_READ(sc, WMREG_MANC);
   14209 
   14210 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14211 		device_xname(sc->sc_dev), manc));
   14212 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14213 		return 0;
   14214 
   14215 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14216 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14217 		factps = CSR_READ(sc, WMREG_FACTPS);
   14218 		if (((factps & FACTPS_MNGCG) == 0)
   14219 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14220 			return 1;
   14221 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14222 		uint16_t data;
   14223 
   14224 		factps = CSR_READ(sc, WMREG_FACTPS);
   14225 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14226 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14227 			device_xname(sc->sc_dev), factps, data));
   14228 		if (((factps & FACTPS_MNGCG) == 0)
   14229 		    && ((data & NVM_CFG2_MNGM_MASK)
   14230 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14231 			return 1;
   14232 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14233 	    && ((manc & MANC_ASF_EN) == 0))
   14234 		return 1;
   14235 
   14236 	return 0;
   14237 }
   14238 
   14239 static bool
   14240 wm_phy_resetisblocked(struct wm_softc *sc)
   14241 {
   14242 	bool blocked = false;
   14243 	uint32_t reg;
   14244 	int i = 0;
   14245 
   14246 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14247 		device_xname(sc->sc_dev), __func__));
   14248 
   14249 	switch (sc->sc_type) {
   14250 	case WM_T_ICH8:
   14251 	case WM_T_ICH9:
   14252 	case WM_T_ICH10:
   14253 	case WM_T_PCH:
   14254 	case WM_T_PCH2:
   14255 	case WM_T_PCH_LPT:
   14256 	case WM_T_PCH_SPT:
   14257 	case WM_T_PCH_CNP:
   14258 		do {
   14259 			reg = CSR_READ(sc, WMREG_FWSM);
   14260 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14261 				blocked = true;
   14262 				delay(10*1000);
   14263 				continue;
   14264 			}
   14265 			blocked = false;
   14266 		} while (blocked && (i++ < 30));
   14267 		return blocked;
   14268 		break;
   14269 	case WM_T_82571:
   14270 	case WM_T_82572:
   14271 	case WM_T_82573:
   14272 	case WM_T_82574:
   14273 	case WM_T_82583:
   14274 	case WM_T_80003:
   14275 		reg = CSR_READ(sc, WMREG_MANC);
   14276 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14277 			return true;
   14278 		else
   14279 			return false;
   14280 		break;
   14281 	default:
   14282 		/* No problem */
   14283 		break;
   14284 	}
   14285 
   14286 	return false;
   14287 }
   14288 
   14289 static void
   14290 wm_get_hw_control(struct wm_softc *sc)
   14291 {
   14292 	uint32_t reg;
   14293 
   14294 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14295 		device_xname(sc->sc_dev), __func__));
   14296 
   14297 	if (sc->sc_type == WM_T_82573) {
   14298 		reg = CSR_READ(sc, WMREG_SWSM);
   14299 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14300 	} else if (sc->sc_type >= WM_T_82571) {
   14301 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14302 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14303 	}
   14304 }
   14305 
   14306 static void
   14307 wm_release_hw_control(struct wm_softc *sc)
   14308 {
   14309 	uint32_t reg;
   14310 
   14311 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14312 		device_xname(sc->sc_dev), __func__));
   14313 
   14314 	if (sc->sc_type == WM_T_82573) {
   14315 		reg = CSR_READ(sc, WMREG_SWSM);
   14316 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14317 	} else if (sc->sc_type >= WM_T_82571) {
   14318 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14319 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14320 	}
   14321 }
   14322 
   14323 static void
   14324 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14325 {
   14326 	uint32_t reg;
   14327 
   14328 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14329 		device_xname(sc->sc_dev), __func__));
   14330 
   14331 	if (sc->sc_type < WM_T_PCH2)
   14332 		return;
   14333 
   14334 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14335 
   14336 	if (gate)
   14337 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14338 	else
   14339 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14340 
   14341 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14342 }
   14343 
   14344 static int
   14345 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14346 {
   14347 	uint32_t fwsm, reg;
   14348 	int rv = 0;
   14349 
   14350 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14351 		device_xname(sc->sc_dev), __func__));
   14352 
   14353 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14354 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14355 
   14356 	/* Disable ULP */
   14357 	wm_ulp_disable(sc);
   14358 
   14359 	/* Acquire PHY semaphore */
   14360 	rv = sc->phy.acquire(sc);
   14361 	if (rv != 0) {
   14362 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14363 		device_xname(sc->sc_dev), __func__));
   14364 		return -1;
   14365 	}
   14366 
   14367 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14368 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14369 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14370 	 */
   14371 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14372 	switch (sc->sc_type) {
   14373 	case WM_T_PCH_LPT:
   14374 	case WM_T_PCH_SPT:
   14375 	case WM_T_PCH_CNP:
   14376 		if (wm_phy_is_accessible_pchlan(sc))
   14377 			break;
   14378 
   14379 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14380 		 * forcing MAC to SMBus mode first.
   14381 		 */
   14382 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14383 		reg |= CTRL_EXT_FORCE_SMBUS;
   14384 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14385 #if 0
   14386 		/* XXX Isn't this required??? */
   14387 		CSR_WRITE_FLUSH(sc);
   14388 #endif
   14389 		/* Wait 50 milliseconds for MAC to finish any retries
   14390 		 * that it might be trying to perform from previous
   14391 		 * attempts to acknowledge any phy read requests.
   14392 		 */
   14393 		delay(50 * 1000);
   14394 		/* FALLTHROUGH */
   14395 	case WM_T_PCH2:
   14396 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14397 			break;
   14398 		/* FALLTHROUGH */
   14399 	case WM_T_PCH:
   14400 		if (sc->sc_type == WM_T_PCH)
   14401 			if ((fwsm & FWSM_FW_VALID) != 0)
   14402 				break;
   14403 
   14404 		if (wm_phy_resetisblocked(sc) == true) {
   14405 			printf("XXX reset is blocked(3)\n");
   14406 			break;
   14407 		}
   14408 
   14409 		/* Toggle LANPHYPC Value bit */
   14410 		wm_toggle_lanphypc_pch_lpt(sc);
   14411 
   14412 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14413 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14414 				break;
   14415 
   14416 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14417 			 * so ensure that the MAC is also out of SMBus mode
   14418 			 */
   14419 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14420 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14421 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14422 
   14423 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14424 				break;
   14425 			rv = -1;
   14426 		}
   14427 		break;
   14428 	default:
   14429 		break;
   14430 	}
   14431 
   14432 	/* Release semaphore */
   14433 	sc->phy.release(sc);
   14434 
   14435 	if (rv == 0) {
   14436 		/* Check to see if able to reset PHY.  Print error if not */
   14437 		if (wm_phy_resetisblocked(sc)) {
   14438 			printf("XXX reset is blocked(4)\n");
   14439 			goto out;
   14440 		}
   14441 
   14442 		/* Reset the PHY before any access to it.  Doing so, ensures
   14443 		 * that the PHY is in a known good state before we read/write
   14444 		 * PHY registers.  The generic reset is sufficient here,
   14445 		 * because we haven't determined the PHY type yet.
   14446 		 */
   14447 		if (wm_reset_phy(sc) != 0)
   14448 			goto out;
   14449 
   14450 		/* On a successful reset, possibly need to wait for the PHY
   14451 		 * to quiesce to an accessible state before returning control
   14452 		 * to the calling function.  If the PHY does not quiesce, then
   14453 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14454 		 *  the PHY is in.
   14455 		 */
   14456 		if (wm_phy_resetisblocked(sc))
   14457 			printf("XXX reset is blocked(4)\n");
   14458 	}
   14459 
   14460 out:
   14461 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14462 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14463 		delay(10*1000);
   14464 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14465 	}
   14466 
   14467 	return 0;
   14468 }
   14469 
   14470 static void
   14471 wm_init_manageability(struct wm_softc *sc)
   14472 {
   14473 
   14474 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14475 		device_xname(sc->sc_dev), __func__));
   14476 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14477 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14478 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14479 
   14480 		/* Disable hardware interception of ARP */
   14481 		manc &= ~MANC_ARP_EN;
   14482 
   14483 		/* Enable receiving management packets to the host */
   14484 		if (sc->sc_type >= WM_T_82571) {
   14485 			manc |= MANC_EN_MNG2HOST;
   14486 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14487 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14488 		}
   14489 
   14490 		CSR_WRITE(sc, WMREG_MANC, manc);
   14491 	}
   14492 }
   14493 
   14494 static void
   14495 wm_release_manageability(struct wm_softc *sc)
   14496 {
   14497 
   14498 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14499 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14500 
   14501 		manc |= MANC_ARP_EN;
   14502 		if (sc->sc_type >= WM_T_82571)
   14503 			manc &= ~MANC_EN_MNG2HOST;
   14504 
   14505 		CSR_WRITE(sc, WMREG_MANC, manc);
   14506 	}
   14507 }
   14508 
   14509 static void
   14510 wm_get_wakeup(struct wm_softc *sc)
   14511 {
   14512 
   14513 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14514 	switch (sc->sc_type) {
   14515 	case WM_T_82573:
   14516 	case WM_T_82583:
   14517 		sc->sc_flags |= WM_F_HAS_AMT;
   14518 		/* FALLTHROUGH */
   14519 	case WM_T_80003:
   14520 	case WM_T_82575:
   14521 	case WM_T_82576:
   14522 	case WM_T_82580:
   14523 	case WM_T_I350:
   14524 	case WM_T_I354:
   14525 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14526 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14527 		/* FALLTHROUGH */
   14528 	case WM_T_82541:
   14529 	case WM_T_82541_2:
   14530 	case WM_T_82547:
   14531 	case WM_T_82547_2:
   14532 	case WM_T_82571:
   14533 	case WM_T_82572:
   14534 	case WM_T_82574:
   14535 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14536 		break;
   14537 	case WM_T_ICH8:
   14538 	case WM_T_ICH9:
   14539 	case WM_T_ICH10:
   14540 	case WM_T_PCH:
   14541 	case WM_T_PCH2:
   14542 	case WM_T_PCH_LPT:
   14543 	case WM_T_PCH_SPT:
   14544 	case WM_T_PCH_CNP:
   14545 		sc->sc_flags |= WM_F_HAS_AMT;
   14546 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14547 		break;
   14548 	default:
   14549 		break;
   14550 	}
   14551 
   14552 	/* 1: HAS_MANAGE */
   14553 	if (wm_enable_mng_pass_thru(sc) != 0)
   14554 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14555 
   14556 	/*
   14557 	 * Note that the WOL flags is set after the resetting of the eeprom
   14558 	 * stuff
   14559 	 */
   14560 }
   14561 
   14562 /*
   14563  * Unconfigure Ultra Low Power mode.
   14564  * Only for I217 and newer (see below).
   14565  */
   14566 static int
   14567 wm_ulp_disable(struct wm_softc *sc)
   14568 {
   14569 	uint32_t reg;
   14570 	uint16_t phyreg;
   14571 	int i = 0, rv = 0;
   14572 
   14573 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14574 		device_xname(sc->sc_dev), __func__));
   14575 	/* Exclude old devices */
   14576 	if ((sc->sc_type < WM_T_PCH_LPT)
   14577 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14578 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14579 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14580 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14581 		return 0;
   14582 
   14583 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14584 		/* Request ME un-configure ULP mode in the PHY */
   14585 		reg = CSR_READ(sc, WMREG_H2ME);
   14586 		reg &= ~H2ME_ULP;
   14587 		reg |= H2ME_ENFORCE_SETTINGS;
   14588 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14589 
   14590 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14591 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14592 			if (i++ == 30) {
   14593 				printf("%s timed out\n", __func__);
   14594 				return -1;
   14595 			}
   14596 			delay(10 * 1000);
   14597 		}
   14598 		reg = CSR_READ(sc, WMREG_H2ME);
   14599 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14600 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14601 
   14602 		return 0;
   14603 	}
   14604 
   14605 	/* Acquire semaphore */
   14606 	rv = sc->phy.acquire(sc);
   14607 	if (rv != 0) {
   14608 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14609 		device_xname(sc->sc_dev), __func__));
   14610 		return -1;
   14611 	}
   14612 
   14613 	/* Toggle LANPHYPC */
   14614 	wm_toggle_lanphypc_pch_lpt(sc);
   14615 
   14616 	/* Unforce SMBus mode in PHY */
   14617 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14618 	if (rv != 0) {
   14619 		uint32_t reg2;
   14620 
   14621 		printf("%s: Force SMBus first.\n", __func__);
   14622 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14623 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14624 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14625 		delay(50 * 1000);
   14626 
   14627 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14628 		    &phyreg);
   14629 		if (rv != 0)
   14630 			goto release;
   14631 	}
   14632 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14633 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14634 
   14635 	/* Unforce SMBus mode in MAC */
   14636 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14637 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14638 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14639 
   14640 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14641 	if (rv != 0)
   14642 		goto release;
   14643 	phyreg |= HV_PM_CTRL_K1_ENA;
   14644 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14645 
   14646 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14647 		&phyreg);
   14648 	if (rv != 0)
   14649 		goto release;
   14650 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14651 	    | I218_ULP_CONFIG1_STICKY_ULP
   14652 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14653 	    | I218_ULP_CONFIG1_WOL_HOST
   14654 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14655 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14656 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14657 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14658 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14659 	phyreg |= I218_ULP_CONFIG1_START;
   14660 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14661 
   14662 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14663 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14664 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14665 
   14666 release:
   14667 	/* Release semaphore */
   14668 	sc->phy.release(sc);
   14669 	wm_gmii_reset(sc);
   14670 	delay(50 * 1000);
   14671 
   14672 	return rv;
   14673 }
   14674 
   14675 /* WOL in the newer chipset interfaces (pchlan) */
   14676 static int
   14677 wm_enable_phy_wakeup(struct wm_softc *sc)
   14678 {
   14679 	device_t dev = sc->sc_dev;
   14680 	uint32_t mreg, moff;
   14681 	uint16_t wuce, wuc, wufc, preg;
   14682 	int i, rv;
   14683 
   14684 	KASSERT(sc->sc_type >= WM_T_PCH);
   14685 
   14686 	/* Copy MAC RARs to PHY RARs */
   14687 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14688 
   14689 	/* Activate PHY wakeup */
   14690 	rv = sc->phy.acquire(sc);
   14691 	if (rv != 0) {
   14692 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14693 		    __func__);
   14694 		return rv;
   14695 	}
   14696 
   14697 	/*
   14698 	 * Enable access to PHY wakeup registers.
   14699 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14700 	 */
   14701 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14702 	if (rv != 0) {
   14703 		device_printf(dev,
   14704 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14705 		goto release;
   14706 	}
   14707 
   14708 	/* Copy MAC MTA to PHY MTA */
   14709 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14710 		uint16_t lo, hi;
   14711 
   14712 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14713 		lo = (uint16_t)(mreg & 0xffff);
   14714 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14715 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14716 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14717 	}
   14718 
   14719 	/* Configure PHY Rx Control register */
   14720 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14721 	mreg = CSR_READ(sc, WMREG_RCTL);
   14722 	if (mreg & RCTL_UPE)
   14723 		preg |= BM_RCTL_UPE;
   14724 	if (mreg & RCTL_MPE)
   14725 		preg |= BM_RCTL_MPE;
   14726 	preg &= ~(BM_RCTL_MO_MASK);
   14727 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14728 	if (moff != 0)
   14729 		preg |= moff << BM_RCTL_MO_SHIFT;
   14730 	if (mreg & RCTL_BAM)
   14731 		preg |= BM_RCTL_BAM;
   14732 	if (mreg & RCTL_PMCF)
   14733 		preg |= BM_RCTL_PMCF;
   14734 	mreg = CSR_READ(sc, WMREG_CTRL);
   14735 	if (mreg & CTRL_RFCE)
   14736 		preg |= BM_RCTL_RFCE;
   14737 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14738 
   14739 	wuc = WUC_APME | WUC_PME_EN;
   14740 	wufc = WUFC_MAG;
   14741 	/* Enable PHY wakeup in MAC register */
   14742 	CSR_WRITE(sc, WMREG_WUC,
   14743 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14744 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14745 
   14746 	/* Configure and enable PHY wakeup in PHY registers */
   14747 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14748 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14749 
   14750 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14751 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14752 
   14753 release:
   14754 	sc->phy.release(sc);
   14755 
   14756 	return 0;
   14757 }
   14758 
   14759 /* Power down workaround on D3 */
   14760 static void
   14761 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14762 {
   14763 	uint32_t reg;
   14764 	uint16_t phyreg;
   14765 	int i;
   14766 
   14767 	for (i = 0; i < 2; i++) {
   14768 		/* Disable link */
   14769 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14770 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14771 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14772 
   14773 		/*
   14774 		 * Call gig speed drop workaround on Gig disable before
   14775 		 * accessing any PHY registers
   14776 		 */
   14777 		if (sc->sc_type == WM_T_ICH8)
   14778 			wm_gig_downshift_workaround_ich8lan(sc);
   14779 
   14780 		/* Write VR power-down enable */
   14781 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14782 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14783 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14784 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14785 
   14786 		/* Read it back and test */
   14787 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14788 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14789 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14790 			break;
   14791 
   14792 		/* Issue PHY reset and repeat at most one more time */
   14793 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14794 	}
   14795 }
   14796 
   14797 /*
   14798  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14799  *  @sc: pointer to the HW structure
   14800  *
   14801  *  During S0 to Sx transition, it is possible the link remains at gig
   14802  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14803  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14804  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14805  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14806  *  needs to be written.
   14807  *  Parts that support (and are linked to a partner which support) EEE in
   14808  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14809  *  than 10Mbps w/o EEE.
   14810  */
   14811 static void
   14812 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14813 {
   14814 	device_t dev = sc->sc_dev;
   14815 	struct ethercom *ec = &sc->sc_ethercom;
   14816 	uint32_t phy_ctrl;
   14817 	int rv;
   14818 
   14819 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14820 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14821 
   14822 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14823 
   14824 	if (sc->sc_phytype == WMPHY_I217) {
   14825 		uint16_t devid = sc->sc_pcidevid;
   14826 
   14827 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14828 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14829 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14830 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14831 		    (sc->sc_type >= WM_T_PCH_SPT))
   14832 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14833 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14834 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14835 
   14836 		if (sc->phy.acquire(sc) != 0)
   14837 			goto out;
   14838 
   14839 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14840 			uint16_t eee_advert;
   14841 
   14842 			rv = wm_read_emi_reg_locked(dev,
   14843 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14844 			if (rv)
   14845 				goto release;
   14846 
   14847 			/*
   14848 			 * Disable LPLU if both link partners support 100BaseT
   14849 			 * EEE and 100Full is advertised on both ends of the
   14850 			 * link, and enable Auto Enable LPI since there will
   14851 			 * be no driver to enable LPI while in Sx.
   14852 			 */
   14853 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14854 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14855 				uint16_t anar, phy_reg;
   14856 
   14857 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14858 				    &anar);
   14859 				if (anar & ANAR_TX_FD) {
   14860 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14861 					    PHY_CTRL_NOND0A_LPLU);
   14862 
   14863 					/* Set Auto Enable LPI after link up */
   14864 					sc->phy.readreg_locked(dev, 2,
   14865 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14866 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14867 					sc->phy.writereg_locked(dev, 2,
   14868 					    I217_LPI_GPIO_CTRL, phy_reg);
   14869 				}
   14870 			}
   14871 		}
   14872 
   14873 		/*
   14874 		 * For i217 Intel Rapid Start Technology support,
   14875 		 * when the system is going into Sx and no manageability engine
   14876 		 * is present, the driver must configure proxy to reset only on
   14877 		 * power good.	LPI (Low Power Idle) state must also reset only
   14878 		 * on power good, as well as the MTA (Multicast table array).
   14879 		 * The SMBus release must also be disabled on LCD reset.
   14880 		 */
   14881 
   14882 		/*
   14883 		 * Enable MTA to reset for Intel Rapid Start Technology
   14884 		 * Support
   14885 		 */
   14886 
   14887 release:
   14888 		sc->phy.release(sc);
   14889 	}
   14890 out:
   14891 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14892 
   14893 	if (sc->sc_type == WM_T_ICH8)
   14894 		wm_gig_downshift_workaround_ich8lan(sc);
   14895 
   14896 	if (sc->sc_type >= WM_T_PCH) {
   14897 		wm_oem_bits_config_ich8lan(sc, false);
   14898 
   14899 		/* Reset PHY to activate OEM bits on 82577/8 */
   14900 		if (sc->sc_type == WM_T_PCH)
   14901 			wm_reset_phy(sc);
   14902 
   14903 		if (sc->phy.acquire(sc) != 0)
   14904 			return;
   14905 		wm_write_smbus_addr(sc);
   14906 		sc->phy.release(sc);
   14907 	}
   14908 }
   14909 
   14910 /*
   14911  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14912  *  @sc: pointer to the HW structure
   14913  *
   14914  *  During Sx to S0 transitions on non-managed devices or managed devices
   14915  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14916  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14917  *  the PHY.
   14918  *  On i217, setup Intel Rapid Start Technology.
   14919  */
   14920 static int
   14921 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14922 {
   14923 	device_t dev = sc->sc_dev;
   14924 	int rv;
   14925 
   14926 	if (sc->sc_type < WM_T_PCH2)
   14927 		return 0;
   14928 
   14929 	rv = wm_init_phy_workarounds_pchlan(sc);
   14930 	if (rv != 0)
   14931 		return -1;
   14932 
   14933 	/* For i217 Intel Rapid Start Technology support when the system
   14934 	 * is transitioning from Sx and no manageability engine is present
   14935 	 * configure SMBus to restore on reset, disable proxy, and enable
   14936 	 * the reset on MTA (Multicast table array).
   14937 	 */
   14938 	if (sc->sc_phytype == WMPHY_I217) {
   14939 		uint16_t phy_reg;
   14940 
   14941 		if (sc->phy.acquire(sc) != 0)
   14942 			return -1;
   14943 
   14944 		/* Clear Auto Enable LPI after link up */
   14945 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14946 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14947 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14948 
   14949 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14950 			/* Restore clear on SMB if no manageability engine
   14951 			 * is present
   14952 			 */
   14953 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14954 			    &phy_reg);
   14955 			if (rv != 0)
   14956 				goto release;
   14957 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14958 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14959 
   14960 			/* Disable Proxy */
   14961 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14962 		}
   14963 		/* Enable reset on MTA */
   14964 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14965 		if (rv != 0)
   14966 			goto release;
   14967 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14968 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14969 
   14970 release:
   14971 		sc->phy.release(sc);
   14972 		return rv;
   14973 	}
   14974 
   14975 	return 0;
   14976 }
   14977 
   14978 static void
   14979 wm_enable_wakeup(struct wm_softc *sc)
   14980 {
   14981 	uint32_t reg, pmreg;
   14982 	pcireg_t pmode;
   14983 	int rv = 0;
   14984 
   14985 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14986 		device_xname(sc->sc_dev), __func__));
   14987 
   14988 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14989 	    &pmreg, NULL) == 0)
   14990 		return;
   14991 
   14992 	if ((sc->sc_flags & WM_F_WOL) == 0)
   14993 		goto pme;
   14994 
   14995 	/* Advertise the wakeup capability */
   14996 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14997 	    | CTRL_SWDPIN(3));
   14998 
   14999 	/* Keep the laser running on fiber adapters */
   15000 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15001 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15002 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15003 		reg |= CTRL_EXT_SWDPIN(3);
   15004 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15005 	}
   15006 
   15007 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15008 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15009 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15010 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15011 		wm_suspend_workarounds_ich8lan(sc);
   15012 
   15013 #if 0	/* For the multicast packet */
   15014 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15015 	reg |= WUFC_MC;
   15016 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15017 #endif
   15018 
   15019 	if (sc->sc_type >= WM_T_PCH) {
   15020 		rv = wm_enable_phy_wakeup(sc);
   15021 		if (rv != 0)
   15022 			goto pme;
   15023 	} else {
   15024 		/* Enable wakeup by the MAC */
   15025 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15026 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15027 	}
   15028 
   15029 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15030 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15031 		|| (sc->sc_type == WM_T_PCH2))
   15032 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15033 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15034 
   15035 pme:
   15036 	/* Request PME */
   15037 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15038 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15039 		/* For WOL */
   15040 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15041 	} else {
   15042 		/* Disable WOL */
   15043 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15044 	}
   15045 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15046 }
   15047 
   15048 /* Disable ASPM L0s and/or L1 for workaround */
   15049 static void
   15050 wm_disable_aspm(struct wm_softc *sc)
   15051 {
   15052 	pcireg_t reg, mask = 0;
   15053 	unsigned const char *str = "";
   15054 
   15055 	/*
   15056 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15057 	 * space.
   15058 	 */
   15059 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15060 		return;
   15061 
   15062 	switch (sc->sc_type) {
   15063 	case WM_T_82571:
   15064 	case WM_T_82572:
   15065 		/*
   15066 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15067 		 * State Power management L1 State (ASPM L1).
   15068 		 */
   15069 		mask = PCIE_LCSR_ASPM_L1;
   15070 		str = "L1 is";
   15071 		break;
   15072 	case WM_T_82573:
   15073 	case WM_T_82574:
   15074 	case WM_T_82583:
   15075 		/*
   15076 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15077 		 *
   15078 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15079 		 * some chipset.  The document of 82574 and 82583 says that
   15080 		 * disabling L0s with some specific chipset is sufficient,
   15081 		 * but we follow as of the Intel em driver does.
   15082 		 *
   15083 		 * References:
   15084 		 * Errata 8 of the Specification Update of i82573.
   15085 		 * Errata 20 of the Specification Update of i82574.
   15086 		 * Errata 9 of the Specification Update of i82583.
   15087 		 */
   15088 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15089 		str = "L0s and L1 are";
   15090 		break;
   15091 	default:
   15092 		return;
   15093 	}
   15094 
   15095 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15096 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15097 	reg &= ~mask;
   15098 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15099 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15100 
   15101 	/* Print only in wm_attach() */
   15102 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15103 		aprint_verbose_dev(sc->sc_dev,
   15104 		    "ASPM %s disabled to workaround the errata.\n", str);
   15105 }
   15106 
   15107 /* LPLU */
   15108 
   15109 static void
   15110 wm_lplu_d0_disable(struct wm_softc *sc)
   15111 {
   15112 	struct mii_data *mii = &sc->sc_mii;
   15113 	uint32_t reg;
   15114 	uint16_t phyval;
   15115 
   15116 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15117 		device_xname(sc->sc_dev), __func__));
   15118 
   15119 	if (sc->sc_phytype == WMPHY_IFE)
   15120 		return;
   15121 
   15122 	switch (sc->sc_type) {
   15123 	case WM_T_82571:
   15124 	case WM_T_82572:
   15125 	case WM_T_82573:
   15126 	case WM_T_82575:
   15127 	case WM_T_82576:
   15128 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15129 		phyval &= ~PMR_D0_LPLU;
   15130 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15131 		break;
   15132 	case WM_T_82580:
   15133 	case WM_T_I350:
   15134 	case WM_T_I210:
   15135 	case WM_T_I211:
   15136 		reg = CSR_READ(sc, WMREG_PHPM);
   15137 		reg &= ~PHPM_D0A_LPLU;
   15138 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15139 		break;
   15140 	case WM_T_82574:
   15141 	case WM_T_82583:
   15142 	case WM_T_ICH8:
   15143 	case WM_T_ICH9:
   15144 	case WM_T_ICH10:
   15145 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15146 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15147 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15148 		CSR_WRITE_FLUSH(sc);
   15149 		break;
   15150 	case WM_T_PCH:
   15151 	case WM_T_PCH2:
   15152 	case WM_T_PCH_LPT:
   15153 	case WM_T_PCH_SPT:
   15154 	case WM_T_PCH_CNP:
   15155 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15156 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15157 		if (wm_phy_resetisblocked(sc) == false)
   15158 			phyval |= HV_OEM_BITS_ANEGNOW;
   15159 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15160 		break;
   15161 	default:
   15162 		break;
   15163 	}
   15164 }
   15165 
   15166 /* EEE */
   15167 
   15168 static int
   15169 wm_set_eee_i350(struct wm_softc *sc)
   15170 {
   15171 	struct ethercom *ec = &sc->sc_ethercom;
   15172 	uint32_t ipcnfg, eeer;
   15173 	uint32_t ipcnfg_mask
   15174 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15175 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15176 
   15177 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15178 
   15179 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15180 	eeer = CSR_READ(sc, WMREG_EEER);
   15181 
   15182 	/* Enable or disable per user setting */
   15183 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15184 		ipcnfg |= ipcnfg_mask;
   15185 		eeer |= eeer_mask;
   15186 	} else {
   15187 		ipcnfg &= ~ipcnfg_mask;
   15188 		eeer &= ~eeer_mask;
   15189 	}
   15190 
   15191 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15192 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15193 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15194 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15195 
   15196 	return 0;
   15197 }
   15198 
   15199 static int
   15200 wm_set_eee_pchlan(struct wm_softc *sc)
   15201 {
   15202 	device_t dev = sc->sc_dev;
   15203 	struct ethercom *ec = &sc->sc_ethercom;
   15204 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15205 	int rv = 0;
   15206 
   15207 	switch (sc->sc_phytype) {
   15208 	case WMPHY_82579:
   15209 		lpa = I82579_EEE_LP_ABILITY;
   15210 		pcs_status = I82579_EEE_PCS_STATUS;
   15211 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15212 		break;
   15213 	case WMPHY_I217:
   15214 		lpa = I217_EEE_LP_ABILITY;
   15215 		pcs_status = I217_EEE_PCS_STATUS;
   15216 		adv_addr = I217_EEE_ADVERTISEMENT;
   15217 		break;
   15218 	default:
   15219 		return 0;
   15220 	}
   15221 
   15222 	if (sc->phy.acquire(sc)) {
   15223 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15224 		return 0;
   15225 	}
   15226 
   15227 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15228 	if (rv != 0)
   15229 		goto release;
   15230 
   15231 	/* Clear bits that enable EEE in various speeds */
   15232 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15233 
   15234 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15235 		/* Save off link partner's EEE ability */
   15236 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15237 		if (rv != 0)
   15238 			goto release;
   15239 
   15240 		/* Read EEE advertisement */
   15241 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15242 			goto release;
   15243 
   15244 		/*
   15245 		 * Enable EEE only for speeds in which the link partner is
   15246 		 * EEE capable and for which we advertise EEE.
   15247 		 */
   15248 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15249 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15250 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15251 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15252 			if ((data & ANLPAR_TX_FD) != 0)
   15253 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15254 			else {
   15255 				/*
   15256 				 * EEE is not supported in 100Half, so ignore
   15257 				 * partner's EEE in 100 ability if full-duplex
   15258 				 * is not advertised.
   15259 				 */
   15260 				sc->eee_lp_ability
   15261 				    &= ~AN_EEEADVERT_100_TX;
   15262 			}
   15263 		}
   15264 	}
   15265 
   15266 	if (sc->sc_phytype == WMPHY_82579) {
   15267 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15268 		if (rv != 0)
   15269 			goto release;
   15270 
   15271 		data &= ~I82579_LPI_PLL_SHUT_100;
   15272 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15273 	}
   15274 
   15275 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15276 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15277 		goto release;
   15278 
   15279 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15280 release:
   15281 	sc->phy.release(sc);
   15282 
   15283 	return rv;
   15284 }
   15285 
   15286 static int
   15287 wm_set_eee(struct wm_softc *sc)
   15288 {
   15289 	struct ethercom *ec = &sc->sc_ethercom;
   15290 
   15291 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15292 		return 0;
   15293 
   15294 	if (sc->sc_type == WM_T_I354) {
   15295 		/* I354 uses an external PHY */
   15296 		return 0; /* not yet */
   15297 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15298 		return wm_set_eee_i350(sc);
   15299 	else if (sc->sc_type >= WM_T_PCH2)
   15300 		return wm_set_eee_pchlan(sc);
   15301 
   15302 	return 0;
   15303 }
   15304 
   15305 /*
   15306  * Workarounds (mainly PHY related).
   15307  * Basically, PHY's workarounds are in the PHY drivers.
   15308  */
   15309 
   15310 /* Work-around for 82566 Kumeran PCS lock loss */
   15311 static int
   15312 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15313 {
   15314 	struct mii_data *mii = &sc->sc_mii;
   15315 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15316 	int i, reg, rv;
   15317 	uint16_t phyreg;
   15318 
   15319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15320 		device_xname(sc->sc_dev), __func__));
   15321 
   15322 	/* If the link is not up, do nothing */
   15323 	if ((status & STATUS_LU) == 0)
   15324 		return 0;
   15325 
   15326 	/* Nothing to do if the link is other than 1Gbps */
   15327 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15328 		return 0;
   15329 
   15330 	for (i = 0; i < 10; i++) {
   15331 		/* read twice */
   15332 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15333 		if (rv != 0)
   15334 			return rv;
   15335 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15336 		if (rv != 0)
   15337 			return rv;
   15338 
   15339 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15340 			goto out;	/* GOOD! */
   15341 
   15342 		/* Reset the PHY */
   15343 		wm_reset_phy(sc);
   15344 		delay(5*1000);
   15345 	}
   15346 
   15347 	/* Disable GigE link negotiation */
   15348 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15349 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15350 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15351 
   15352 	/*
   15353 	 * Call gig speed drop workaround on Gig disable before accessing
   15354 	 * any PHY registers.
   15355 	 */
   15356 	wm_gig_downshift_workaround_ich8lan(sc);
   15357 
   15358 out:
   15359 	return 0;
   15360 }
   15361 
   15362 /*
   15363  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15364  *  @sc: pointer to the HW structure
   15365  *
   15366  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15367  *  LPLU, Gig disable, MDIC PHY reset):
   15368  *    1) Set Kumeran Near-end loopback
   15369  *    2) Clear Kumeran Near-end loopback
   15370  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15371  */
   15372 static void
   15373 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15374 {
   15375 	uint16_t kmreg;
   15376 
   15377 	/* Only for igp3 */
   15378 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15379 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15380 			return;
   15381 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15382 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15383 			return;
   15384 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15385 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15386 	}
   15387 }
   15388 
   15389 /*
   15390  * Workaround for pch's PHYs
   15391  * XXX should be moved to new PHY driver?
   15392  */
   15393 static int
   15394 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15395 {
   15396 	device_t dev = sc->sc_dev;
   15397 	struct mii_data *mii = &sc->sc_mii;
   15398 	struct mii_softc *child;
   15399 	uint16_t phy_data, phyrev = 0;
   15400 	int phytype = sc->sc_phytype;
   15401 	int rv;
   15402 
   15403 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15404 		device_xname(dev), __func__));
   15405 	KASSERT(sc->sc_type == WM_T_PCH);
   15406 
   15407 	/* Set MDIO slow mode before any other MDIO access */
   15408 	if (phytype == WMPHY_82577)
   15409 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15410 			return rv;
   15411 
   15412 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15413 	if (child != NULL)
   15414 		phyrev = child->mii_mpd_rev;
   15415 
   15416 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15417 	if ((child != NULL) &&
   15418 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15419 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15420 		/* Disable generation of early preamble (0x4431) */
   15421 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15422 		    &phy_data);
   15423 		if (rv != 0)
   15424 			return rv;
   15425 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15426 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15427 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15428 		    phy_data);
   15429 		if (rv != 0)
   15430 			return rv;
   15431 
   15432 		/* Preamble tuning for SSC */
   15433 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15434 		if (rv != 0)
   15435 			return rv;
   15436 	}
   15437 
   15438 	/* 82578 */
   15439 	if (phytype == WMPHY_82578) {
   15440 		/*
   15441 		 * Return registers to default by doing a soft reset then
   15442 		 * writing 0x3140 to the control register
   15443 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15444 		 */
   15445 		if ((child != NULL) && (phyrev < 2)) {
   15446 			PHY_RESET(child);
   15447 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15448 			    0x3140);
   15449 			if (rv != 0)
   15450 				return rv;
   15451 		}
   15452 	}
   15453 
   15454 	/* Select page 0 */
   15455 	if ((rv = sc->phy.acquire(sc)) != 0)
   15456 		return rv;
   15457 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15458 	sc->phy.release(sc);
   15459 	if (rv != 0)
   15460 		return rv;
   15461 
   15462 	/*
   15463 	 * Configure the K1 Si workaround during phy reset assuming there is
   15464 	 * link so that it disables K1 if link is in 1Gbps.
   15465 	 */
   15466 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15467 		return rv;
   15468 
   15469 	/* Workaround for link disconnects on a busy hub in half duplex */
   15470 	rv = sc->phy.acquire(sc);
   15471 	if (rv)
   15472 		return rv;
   15473 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15474 	if (rv)
   15475 		goto release;
   15476 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15477 	    phy_data & 0x00ff);
   15478 	if (rv)
   15479 		goto release;
   15480 
   15481 	/* Set MSE higher to enable link to stay up when noise is high */
   15482 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15483 release:
   15484 	sc->phy.release(sc);
   15485 
   15486 	return rv;
   15487 
   15488 
   15489 }
   15490 
   15491 /*
   15492  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15493  *  @sc:   pointer to the HW structure
   15494  */
   15495 static void
   15496 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15497 {
   15498 	device_t dev = sc->sc_dev;
   15499 	uint32_t mac_reg;
   15500 	uint16_t i, wuce;
   15501 	int count;
   15502 
   15503 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15504 		device_xname(sc->sc_dev), __func__));
   15505 
   15506 	if (sc->phy.acquire(sc) != 0)
   15507 		return;
   15508 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15509 		goto release;
   15510 
   15511 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15512 	count = wm_rar_count(sc);
   15513 	for (i = 0; i < count; i++) {
   15514 		uint16_t lo, hi;
   15515 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15516 		lo = (uint16_t)(mac_reg & 0xffff);
   15517 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15518 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15519 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15520 
   15521 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15522 		lo = (uint16_t)(mac_reg & 0xffff);
   15523 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15524 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15525 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15526 	}
   15527 
   15528 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15529 
   15530 release:
   15531 	sc->phy.release(sc);
   15532 }
   15533 
   15534 /*
   15535  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15536  *  done after every PHY reset.
   15537  */
   15538 static int
   15539 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15540 {
   15541 	device_t dev = sc->sc_dev;
   15542 	int rv;
   15543 
   15544 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15545 		device_xname(dev), __func__));
   15546 	KASSERT(sc->sc_type == WM_T_PCH2);
   15547 
   15548 	/* Set MDIO slow mode before any other MDIO access */
   15549 	rv = wm_set_mdio_slow_mode_hv(sc);
   15550 	if (rv != 0)
   15551 		return rv;
   15552 
   15553 	rv = sc->phy.acquire(sc);
   15554 	if (rv != 0)
   15555 		return rv;
   15556 	/* Set MSE higher to enable link to stay up when noise is high */
   15557 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15558 	if (rv != 0)
   15559 		goto release;
   15560 	/* Drop link after 5 times MSE threshold was reached */
   15561 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15562 release:
   15563 	sc->phy.release(sc);
   15564 
   15565 	return rv;
   15566 }
   15567 
   15568 /**
   15569  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15570  *  @link: link up bool flag
   15571  *
   15572  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15573  *  preventing further DMA write requests.  Workaround the issue by disabling
   15574  *  the de-assertion of the clock request when in 1Gpbs mode.
   15575  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15576  *  speeds in order to avoid Tx hangs.
   15577  **/
   15578 static int
   15579 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15580 {
   15581 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15582 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15583 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15584 	uint16_t phyreg;
   15585 
   15586 	if (link && (speed == STATUS_SPEED_1000)) {
   15587 		sc->phy.acquire(sc);
   15588 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15589 		    &phyreg);
   15590 		if (rv != 0)
   15591 			goto release;
   15592 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15593 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15594 		if (rv != 0)
   15595 			goto release;
   15596 		delay(20);
   15597 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15598 
   15599 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15600 		    &phyreg);
   15601 release:
   15602 		sc->phy.release(sc);
   15603 		return rv;
   15604 	}
   15605 
   15606 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15607 
   15608 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15609 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15610 	    || !link
   15611 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15612 		goto update_fextnvm6;
   15613 
   15614 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15615 
   15616 	/* Clear link status transmit timeout */
   15617 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15618 	if (speed == STATUS_SPEED_100) {
   15619 		/* Set inband Tx timeout to 5x10us for 100Half */
   15620 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15621 
   15622 		/* Do not extend the K1 entry latency for 100Half */
   15623 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15624 	} else {
   15625 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15626 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15627 
   15628 		/* Extend the K1 entry latency for 10 Mbps */
   15629 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15630 	}
   15631 
   15632 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15633 
   15634 update_fextnvm6:
   15635 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15636 	return 0;
   15637 }
   15638 
   15639 /*
   15640  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15641  *  @sc:   pointer to the HW structure
   15642  *  @link: link up bool flag
   15643  *
   15644  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15645  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15646  *  If link is down, the function will restore the default K1 setting located
   15647  *  in the NVM.
   15648  */
   15649 static int
   15650 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15651 {
   15652 	int k1_enable = sc->sc_nvm_k1_enabled;
   15653 
   15654 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15655 		device_xname(sc->sc_dev), __func__));
   15656 
   15657 	if (sc->phy.acquire(sc) != 0)
   15658 		return -1;
   15659 
   15660 	if (link) {
   15661 		k1_enable = 0;
   15662 
   15663 		/* Link stall fix for link up */
   15664 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15665 		    0x0100);
   15666 	} else {
   15667 		/* Link stall fix for link down */
   15668 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15669 		    0x4100);
   15670 	}
   15671 
   15672 	wm_configure_k1_ich8lan(sc, k1_enable);
   15673 	sc->phy.release(sc);
   15674 
   15675 	return 0;
   15676 }
   15677 
   15678 /*
   15679  *  wm_k1_workaround_lv - K1 Si workaround
   15680  *  @sc:   pointer to the HW structure
   15681  *
   15682  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15683  *  Disable K1 for 1000 and 100 speeds
   15684  */
   15685 static int
   15686 wm_k1_workaround_lv(struct wm_softc *sc)
   15687 {
   15688 	uint32_t reg;
   15689 	uint16_t phyreg;
   15690 	int rv;
   15691 
   15692 	if (sc->sc_type != WM_T_PCH2)
   15693 		return 0;
   15694 
   15695 	/* Set K1 beacon duration based on 10Mbps speed */
   15696 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15697 	if (rv != 0)
   15698 		return rv;
   15699 
   15700 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15701 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15702 		if (phyreg &
   15703 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15704 			/* LV 1G/100 Packet drop issue wa  */
   15705 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15706 			    &phyreg);
   15707 			if (rv != 0)
   15708 				return rv;
   15709 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15710 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15711 			    phyreg);
   15712 			if (rv != 0)
   15713 				return rv;
   15714 		} else {
   15715 			/* For 10Mbps */
   15716 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15717 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15718 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15719 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15720 		}
   15721 	}
   15722 
   15723 	return 0;
   15724 }
   15725 
   15726 /*
   15727  *  wm_link_stall_workaround_hv - Si workaround
   15728  *  @sc: pointer to the HW structure
   15729  *
   15730  *  This function works around a Si bug where the link partner can get
   15731  *  a link up indication before the PHY does. If small packets are sent
   15732  *  by the link partner they can be placed in the packet buffer without
   15733  *  being properly accounted for by the PHY and will stall preventing
   15734  *  further packets from being received.  The workaround is to clear the
   15735  *  packet buffer after the PHY detects link up.
   15736  */
   15737 static int
   15738 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15739 {
   15740 	uint16_t phyreg;
   15741 
   15742 	if (sc->sc_phytype != WMPHY_82578)
   15743 		return 0;
   15744 
   15745 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15746 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15747 	if ((phyreg & BMCR_LOOP) != 0)
   15748 		return 0;
   15749 
   15750 	/* Check if link is up and at 1Gbps */
   15751 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15752 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15753 	    | BM_CS_STATUS_SPEED_MASK;
   15754 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15755 		| BM_CS_STATUS_SPEED_1000))
   15756 		return 0;
   15757 
   15758 	delay(200 * 1000);	/* XXX too big */
   15759 
   15760 	/* Flush the packets in the fifo buffer */
   15761 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15762 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15763 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15764 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15765 
   15766 	return 0;
   15767 }
   15768 
   15769 static int
   15770 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15771 {
   15772 	int rv;
   15773 	uint16_t reg;
   15774 
   15775 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15776 	if (rv != 0)
   15777 		return rv;
   15778 
   15779 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15780 	    reg | HV_KMRN_MDIO_SLOW);
   15781 }
   15782 
   15783 /*
   15784  *  wm_configure_k1_ich8lan - Configure K1 power state
   15785  *  @sc: pointer to the HW structure
   15786  *  @enable: K1 state to configure
   15787  *
   15788  *  Configure the K1 power state based on the provided parameter.
   15789  *  Assumes semaphore already acquired.
   15790  */
   15791 static void
   15792 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15793 {
   15794 	uint32_t ctrl, ctrl_ext, tmp;
   15795 	uint16_t kmreg;
   15796 	int rv;
   15797 
   15798 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15799 
   15800 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15801 	if (rv != 0)
   15802 		return;
   15803 
   15804 	if (k1_enable)
   15805 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15806 	else
   15807 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15808 
   15809 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15810 	if (rv != 0)
   15811 		return;
   15812 
   15813 	delay(20);
   15814 
   15815 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15816 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15817 
   15818 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15819 	tmp |= CTRL_FRCSPD;
   15820 
   15821 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15822 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15823 	CSR_WRITE_FLUSH(sc);
   15824 	delay(20);
   15825 
   15826 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15827 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15828 	CSR_WRITE_FLUSH(sc);
   15829 	delay(20);
   15830 
   15831 	return;
   15832 }
   15833 
   15834 /* special case - for 82575 - need to do manual init ... */
   15835 static void
   15836 wm_reset_init_script_82575(struct wm_softc *sc)
   15837 {
   15838 	/*
   15839 	 * Remark: this is untested code - we have no board without EEPROM
   15840 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15841 	 */
   15842 
   15843 	/* SerDes configuration via SERDESCTRL */
   15844 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15845 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15846 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15847 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15848 
   15849 	/* CCM configuration via CCMCTL register */
   15850 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15851 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15852 
   15853 	/* PCIe lanes configuration */
   15854 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15855 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15856 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15857 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15858 
   15859 	/* PCIe PLL Configuration */
   15860 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15861 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15862 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15863 }
   15864 
   15865 static void
   15866 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15867 {
   15868 	uint32_t reg;
   15869 	uint16_t nvmword;
   15870 	int rv;
   15871 
   15872 	if (sc->sc_type != WM_T_82580)
   15873 		return;
   15874 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15875 		return;
   15876 
   15877 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15878 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15879 	if (rv != 0) {
   15880 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15881 		    __func__);
   15882 		return;
   15883 	}
   15884 
   15885 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15886 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15887 		reg |= MDICNFG_DEST;
   15888 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15889 		reg |= MDICNFG_COM_MDIO;
   15890 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15891 }
   15892 
   15893 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15894 
   15895 static bool
   15896 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15897 {
   15898 	uint32_t reg;
   15899 	uint16_t id1, id2;
   15900 	int i, rv;
   15901 
   15902 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15903 		device_xname(sc->sc_dev), __func__));
   15904 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15905 
   15906 	id1 = id2 = 0xffff;
   15907 	for (i = 0; i < 2; i++) {
   15908 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15909 		    &id1);
   15910 		if ((rv != 0) || MII_INVALIDID(id1))
   15911 			continue;
   15912 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15913 		    &id2);
   15914 		if ((rv != 0) || MII_INVALIDID(id2))
   15915 			continue;
   15916 		break;
   15917 	}
   15918 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15919 		goto out;
   15920 
   15921 	/*
   15922 	 * In case the PHY needs to be in mdio slow mode,
   15923 	 * set slow mode and try to get the PHY id again.
   15924 	 */
   15925 	rv = 0;
   15926 	if (sc->sc_type < WM_T_PCH_LPT) {
   15927 		sc->phy.release(sc);
   15928 		wm_set_mdio_slow_mode_hv(sc);
   15929 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15930 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15931 		sc->phy.acquire(sc);
   15932 	}
   15933 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15934 		printf("XXX return with false\n");
   15935 		return false;
   15936 	}
   15937 out:
   15938 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15939 		/* Only unforce SMBus if ME is not active */
   15940 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15941 			uint16_t phyreg;
   15942 
   15943 			/* Unforce SMBus mode in PHY */
   15944 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15945 			    CV_SMB_CTRL, &phyreg);
   15946 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15947 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15948 			    CV_SMB_CTRL, phyreg);
   15949 
   15950 			/* Unforce SMBus mode in MAC */
   15951 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15952 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15953 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15954 		}
   15955 	}
   15956 	return true;
   15957 }
   15958 
   15959 static void
   15960 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15961 {
   15962 	uint32_t reg;
   15963 	int i;
   15964 
   15965 	/* Set PHY Config Counter to 50msec */
   15966 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15967 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15968 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15969 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15970 
   15971 	/* Toggle LANPHYPC */
   15972 	reg = CSR_READ(sc, WMREG_CTRL);
   15973 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15974 	reg &= ~CTRL_LANPHYPC_VALUE;
   15975 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15976 	CSR_WRITE_FLUSH(sc);
   15977 	delay(1000);
   15978 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15979 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15980 	CSR_WRITE_FLUSH(sc);
   15981 
   15982 	if (sc->sc_type < WM_T_PCH_LPT)
   15983 		delay(50 * 1000);
   15984 	else {
   15985 		i = 20;
   15986 
   15987 		do {
   15988 			delay(5 * 1000);
   15989 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15990 		    && i--);
   15991 
   15992 		delay(30 * 1000);
   15993 	}
   15994 }
   15995 
   15996 static int
   15997 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15998 {
   15999 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16000 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16001 	uint32_t rxa;
   16002 	uint16_t scale = 0, lat_enc = 0;
   16003 	int32_t obff_hwm = 0;
   16004 	int64_t lat_ns, value;
   16005 
   16006 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16007 		device_xname(sc->sc_dev), __func__));
   16008 
   16009 	if (link) {
   16010 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16011 		uint32_t status;
   16012 		uint16_t speed;
   16013 		pcireg_t preg;
   16014 
   16015 		status = CSR_READ(sc, WMREG_STATUS);
   16016 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16017 		case STATUS_SPEED_10:
   16018 			speed = 10;
   16019 			break;
   16020 		case STATUS_SPEED_100:
   16021 			speed = 100;
   16022 			break;
   16023 		case STATUS_SPEED_1000:
   16024 			speed = 1000;
   16025 			break;
   16026 		default:
   16027 			device_printf(sc->sc_dev, "Unknown speed "
   16028 			    "(status = %08x)\n", status);
   16029 			return -1;
   16030 		}
   16031 
   16032 		/* Rx Packet Buffer Allocation size (KB) */
   16033 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16034 
   16035 		/*
   16036 		 * Determine the maximum latency tolerated by the device.
   16037 		 *
   16038 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16039 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16040 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16041 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16042 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16043 		 */
   16044 		lat_ns = ((int64_t)rxa * 1024 -
   16045 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16046 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16047 		if (lat_ns < 0)
   16048 			lat_ns = 0;
   16049 		else
   16050 			lat_ns /= speed;
   16051 		value = lat_ns;
   16052 
   16053 		while (value > LTRV_VALUE) {
   16054 			scale ++;
   16055 			value = howmany(value, __BIT(5));
   16056 		}
   16057 		if (scale > LTRV_SCALE_MAX) {
   16058 			printf("%s: Invalid LTR latency scale %d\n",
   16059 			    device_xname(sc->sc_dev), scale);
   16060 			return -1;
   16061 		}
   16062 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16063 
   16064 		/* Determine the maximum latency tolerated by the platform */
   16065 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16066 		    WM_PCI_LTR_CAP_LPT);
   16067 		max_snoop = preg & 0xffff;
   16068 		max_nosnoop = preg >> 16;
   16069 
   16070 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16071 
   16072 		if (lat_enc > max_ltr_enc) {
   16073 			lat_enc = max_ltr_enc;
   16074 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16075 			    * PCI_LTR_SCALETONS(
   16076 				    __SHIFTOUT(lat_enc,
   16077 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16078 		}
   16079 
   16080 		if (lat_ns) {
   16081 			lat_ns *= speed * 1000;
   16082 			lat_ns /= 8;
   16083 			lat_ns /= 1000000000;
   16084 			obff_hwm = (int32_t)(rxa - lat_ns);
   16085 		}
   16086 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16087 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16088 			    "(rxa = %d, lat_ns = %d)\n",
   16089 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16090 			return -1;
   16091 		}
   16092 	}
   16093 	/* Snoop and No-Snoop latencies the same */
   16094 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16095 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16096 
   16097 	/* Set OBFF high water mark */
   16098 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16099 	reg |= obff_hwm;
   16100 	CSR_WRITE(sc, WMREG_SVT, reg);
   16101 
   16102 	/* Enable OBFF */
   16103 	reg = CSR_READ(sc, WMREG_SVCR);
   16104 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16105 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16106 
   16107 	return 0;
   16108 }
   16109 
   16110 /*
   16111  * I210 Errata 25 and I211 Errata 10
   16112  * Slow System Clock.
   16113  */
   16114 static int
   16115 wm_pll_workaround_i210(struct wm_softc *sc)
   16116 {
   16117 	uint32_t mdicnfg, wuc;
   16118 	uint32_t reg;
   16119 	pcireg_t pcireg;
   16120 	uint32_t pmreg;
   16121 	uint16_t nvmword, tmp_nvmword;
   16122 	uint16_t phyval;
   16123 	bool wa_done = false;
   16124 	int i, rv = 0;
   16125 
   16126 	/* Get Power Management cap offset */
   16127 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16128 	    &pmreg, NULL) == 0)
   16129 		return -1;
   16130 
   16131 	/* Save WUC and MDICNFG registers */
   16132 	wuc = CSR_READ(sc, WMREG_WUC);
   16133 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16134 
   16135 	reg = mdicnfg & ~MDICNFG_DEST;
   16136 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16137 
   16138 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16139 		nvmword = INVM_DEFAULT_AL;
   16140 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16141 
   16142 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16143 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16144 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16145 
   16146 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16147 			rv = 0;
   16148 			break; /* OK */
   16149 		} else
   16150 			rv = -1;
   16151 
   16152 		wa_done = true;
   16153 		/* Directly reset the internal PHY */
   16154 		reg = CSR_READ(sc, WMREG_CTRL);
   16155 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16156 
   16157 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16158 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16159 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16160 
   16161 		CSR_WRITE(sc, WMREG_WUC, 0);
   16162 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16163 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16164 
   16165 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16166 		    pmreg + PCI_PMCSR);
   16167 		pcireg |= PCI_PMCSR_STATE_D3;
   16168 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16169 		    pmreg + PCI_PMCSR, pcireg);
   16170 		delay(1000);
   16171 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16172 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16173 		    pmreg + PCI_PMCSR, pcireg);
   16174 
   16175 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16176 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16177 
   16178 		/* Restore WUC register */
   16179 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16180 	}
   16181 
   16182 	/* Restore MDICNFG setting */
   16183 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16184 	if (wa_done)
   16185 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16186 	return rv;
   16187 }
   16188 
   16189 static void
   16190 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16191 {
   16192 	uint32_t reg;
   16193 
   16194 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16195 		device_xname(sc->sc_dev), __func__));
   16196 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16197 	    || (sc->sc_type == WM_T_PCH_CNP));
   16198 
   16199 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16200 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16201 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16202 
   16203 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16204 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16205 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16206 }
   16207