Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.626
      1 /*	$NetBSD: if_wm.c,v 1.626 2019/02/08 06:33:04 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.626 2019/02/08 06:33:04 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_ec_capenable;		/* last ec_capenable */
    518 	int sc_flowflags;		/* 802.3x flow control flags */
    519 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    520 	int sc_align_tweak;
    521 
    522 	void *sc_ihs[WM_MAX_NINTR];	/*
    523 					 * interrupt cookie.
    524 					 * - legacy and msi use sc_ihs[0] only
    525 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	pci_intr_handle_t *sc_intrs;	/*
    528 					 * legacy and msi use sc_intrs[0] only
    529 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    530 					 */
    531 	int sc_nintrs;			/* number of interrupts */
    532 
    533 	int sc_link_intr_idx;		/* index of MSI-X tables */
    534 
    535 	callout_t sc_tick_ch;		/* tick callout */
    536 	bool sc_core_stopping;
    537 
    538 	int sc_nvm_ver_major;
    539 	int sc_nvm_ver_minor;
    540 	int sc_nvm_ver_build;
    541 	int sc_nvm_addrbits;		/* NVM address bits */
    542 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    543 	int sc_ich8_flash_base;
    544 	int sc_ich8_flash_bank_size;
    545 	int sc_nvm_k1_enabled;
    546 
    547 	int sc_nqueues;
    548 	struct wm_queue *sc_queue;
    549 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    550 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    551 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    552 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    553 
    554 	int sc_affinity_offset;
    555 
    556 #ifdef WM_EVENT_COUNTERS
    557 	/* Event counters. */
    558 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    559 
    560 	/* WM_T_82542_2_1 only */
    561 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    564 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    565 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    566 #endif /* WM_EVENT_COUNTERS */
    567 
    568 	/* This variable are used only on the 82547. */
    569 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    570 
    571 	uint32_t sc_ctrl;		/* prototype CTRL register */
    572 #if 0
    573 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    574 #endif
    575 	uint32_t sc_icr;		/* prototype interrupt bits */
    576 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    577 	uint32_t sc_tctl;		/* prototype TCTL register */
    578 	uint32_t sc_rctl;		/* prototype RCTL register */
    579 	uint32_t sc_txcw;		/* prototype TXCW register */
    580 	uint32_t sc_tipg;		/* prototype TIPG register */
    581 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    582 	uint32_t sc_pba;		/* prototype PBA register */
    583 
    584 	int sc_tbi_linkup;		/* TBI link status */
    585 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    586 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    587 
    588 	int sc_mchash_type;		/* multicast filter offset */
    589 
    590 	krndsource_t rnd_source;	/* random source */
    591 
    592 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    593 
    594 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    595 	kmutex_t *sc_ich_phymtx;	/*
    596 					 * 82574/82583/ICH/PCH specific PHY
    597 					 * mutex. For 82574/82583, the mutex
    598 					 * is used for both PHY and NVM.
    599 					 */
    600 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    601 
    602 	struct wm_phyop phy;
    603 	struct wm_nvmop nvm;
    604 };
    605 
    606 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    607 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    608 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    609 
    610 #define	WM_RXCHAIN_RESET(rxq)						\
    611 do {									\
    612 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    613 	*(rxq)->rxq_tailp = NULL;					\
    614 	(rxq)->rxq_len = 0;						\
    615 } while (/*CONSTCOND*/0)
    616 
    617 #define	WM_RXCHAIN_LINK(rxq, m)						\
    618 do {									\
    619 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    620 	(rxq)->rxq_tailp = &(m)->m_next;				\
    621 } while (/*CONSTCOND*/0)
    622 
    623 #ifdef WM_EVENT_COUNTERS
    624 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    625 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    626 
    627 #define WM_Q_EVCNT_INCR(qname, evname)			\
    628 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    629 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    630 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    631 #else /* !WM_EVENT_COUNTERS */
    632 #define	WM_EVCNT_INCR(ev)	/* nothing */
    633 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    634 
    635 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    636 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    637 #endif /* !WM_EVENT_COUNTERS */
    638 
    639 #define	CSR_READ(sc, reg)						\
    640 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    641 #define	CSR_WRITE(sc, reg, val)						\
    642 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    643 #define	CSR_WRITE_FLUSH(sc)						\
    644 	(void) CSR_READ((sc), WMREG_STATUS)
    645 
    646 #define ICH8_FLASH_READ32(sc, reg)					\
    647 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    648 	    (reg) + sc->sc_flashreg_offset)
    649 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    650 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset, (data))
    652 
    653 #define ICH8_FLASH_READ16(sc, reg)					\
    654 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    655 	    (reg) + sc->sc_flashreg_offset)
    656 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    657 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    658 	    (reg) + sc->sc_flashreg_offset, (data))
    659 
    660 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    661 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    662 
    663 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    664 #define	WM_CDTXADDR_HI(txq, x)						\
    665 	(sizeof(bus_addr_t) == 8 ?					\
    666 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    667 
    668 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    669 #define	WM_CDRXADDR_HI(rxq, x)						\
    670 	(sizeof(bus_addr_t) == 8 ?					\
    671 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    672 
    673 /*
    674  * Register read/write functions.
    675  * Other than CSR_{READ|WRITE}().
    676  */
    677 #if 0
    678 static inline uint32_t wm_io_read(struct wm_softc *, int);
    679 #endif
    680 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    681 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    682     uint32_t, uint32_t);
    683 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    684 
    685 /*
    686  * Descriptor sync/init functions.
    687  */
    688 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    689 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    690 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    691 
    692 /*
    693  * Device driver interface functions and commonly used functions.
    694  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    695  */
    696 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    697 static int	wm_match(device_t, cfdata_t, void *);
    698 static void	wm_attach(device_t, device_t, void *);
    699 static int	wm_detach(device_t, int);
    700 static bool	wm_suspend(device_t, const pmf_qual_t *);
    701 static bool	wm_resume(device_t, const pmf_qual_t *);
    702 static void	wm_watchdog(struct ifnet *);
    703 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    706     uint16_t *);
    707 static void	wm_tick(void *);
    708 static int	wm_ifflags_cb(struct ethercom *);
    709 static int	wm_ioctl(struct ifnet *, u_long, void *);
    710 /* MAC address related */
    711 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    712 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    713 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    714 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    715 static int	wm_rar_count(struct wm_softc *);
    716 static void	wm_set_filter(struct wm_softc *);
    717 /* Reset and init related */
    718 static void	wm_set_vlan(struct wm_softc *);
    719 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    720 static void	wm_get_auto_rd_done(struct wm_softc *);
    721 static void	wm_lan_init_done(struct wm_softc *);
    722 static void	wm_get_cfg_done(struct wm_softc *);
    723 static int	wm_phy_post_reset(struct wm_softc *);
    724 static int	wm_write_smbus_addr(struct wm_softc *);
    725 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    726 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    727 static void	wm_initialize_hardware_bits(struct wm_softc *);
    728 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    729 static int	wm_reset_phy(struct wm_softc *);
    730 static void	wm_flush_desc_rings(struct wm_softc *);
    731 static void	wm_reset(struct wm_softc *);
    732 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    733 static void	wm_rxdrain(struct wm_rxqueue *);
    734 static void	wm_init_rss(struct wm_softc *);
    735 static void	wm_adjust_qnum(struct wm_softc *, int);
    736 static inline bool	wm_is_using_msix(struct wm_softc *);
    737 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    738 static int	wm_softint_establish(struct wm_softc *, int, int);
    739 static int	wm_setup_legacy(struct wm_softc *);
    740 static int	wm_setup_msix(struct wm_softc *);
    741 static int	wm_init(struct ifnet *);
    742 static int	wm_init_locked(struct ifnet *);
    743 static void	wm_unset_stopping_flags(struct wm_softc *);
    744 static void	wm_set_stopping_flags(struct wm_softc *);
    745 static void	wm_stop(struct ifnet *, int);
    746 static void	wm_stop_locked(struct ifnet *, int);
    747 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    748 static void	wm_82547_txfifo_stall(void *);
    749 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    750 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    751 /* DMA related */
    752 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    753 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    754 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    756     struct wm_txqueue *);
    757 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    758 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    760     struct wm_rxqueue *);
    761 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    762 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    763 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    765 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    766 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    768     struct wm_txqueue *);
    769 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    770     struct wm_rxqueue *);
    771 static int	wm_alloc_txrx_queues(struct wm_softc *);
    772 static void	wm_free_txrx_queues(struct wm_softc *);
    773 static int	wm_init_txrx_queues(struct wm_softc *);
    774 /* Start */
    775 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    776     struct wm_txsoft *, uint32_t *, uint8_t *);
    777 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    778 static void	wm_start(struct ifnet *);
    779 static void	wm_start_locked(struct ifnet *);
    780 static int	wm_transmit(struct ifnet *, struct mbuf *);
    781 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    782 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    783     bool);
    784 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    785     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    786 static void	wm_nq_start(struct ifnet *);
    787 static void	wm_nq_start_locked(struct ifnet *);
    788 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    789 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    790 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    791     bool);
    792 static void	wm_deferred_start_locked(struct wm_txqueue *);
    793 static void	wm_handle_queue(void *);
    794 /* Interrupt */
    795 static bool	wm_txeof(struct wm_txqueue *, u_int);
    796 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    797 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    798 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    799 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr(struct wm_softc *, uint32_t);
    801 static int	wm_intr_legacy(void *);
    802 static inline void	wm_txrxintr_disable(struct wm_queue *);
    803 static inline void	wm_txrxintr_enable(struct wm_queue *);
    804 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    805 static int	wm_txrxintr_msix(void *);
    806 static int	wm_linkintr_msix(void *);
    807 
    808 /*
    809  * Media related.
    810  * GMII, SGMII, TBI, SERDES and SFP.
    811  */
    812 /* Common */
    813 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    814 /* GMII related */
    815 static void	wm_gmii_reset(struct wm_softc *);
    816 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    817 static int	wm_get_phy_id_82575(struct wm_softc *);
    818 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    819 static int	wm_gmii_mediachange(struct ifnet *);
    820 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    821 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    822 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    823 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    824 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    825 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    826 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    827 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    828 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    830 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    831 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    832 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    833 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    834 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    835 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    836 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    837 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    838 	bool);
    839 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    840 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    842 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    843 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    844 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    845 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    847 static void	wm_gmii_statchg(struct ifnet *);
    848 /*
    849  * kumeran related (80003, ICH* and PCH*).
    850  * These functions are not for accessing MII registers but for accessing
    851  * kumeran specific registers.
    852  */
    853 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    854 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    855 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    856 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    857 /* EMI register related */
    858 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    859 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    860 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    861 /* SGMII */
    862 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    863 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    864 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    865 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    866 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    867 /* TBI related */
    868 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    869 static void	wm_tbi_mediainit(struct wm_softc *);
    870 static int	wm_tbi_mediachange(struct ifnet *);
    871 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    872 static int	wm_check_for_link(struct wm_softc *);
    873 static void	wm_tbi_tick(struct wm_softc *);
    874 /* SERDES related */
    875 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    876 static int	wm_serdes_mediachange(struct ifnet *);
    877 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    878 static void	wm_serdes_tick(struct wm_softc *);
    879 /* SFP related */
    880 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    881 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    882 
    883 /*
    884  * NVM related.
    885  * Microwire, SPI (w/wo EERD) and Flash.
    886  */
    887 /* Misc functions */
    888 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    889 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    890 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    891 /* Microwire */
    892 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    893 /* SPI */
    894 static int	wm_nvm_ready_spi(struct wm_softc *);
    895 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    896 /* Using with EERD */
    897 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    898 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    899 /* Flash */
    900 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    901     unsigned int *);
    902 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    903 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    904 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    905     uint32_t *);
    906 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    907 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    908 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    909 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    910 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    911 /* iNVM */
    912 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    913 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    914 /* Lock, detecting NVM type, validate checksum and read */
    915 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    916 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    917 static int	wm_nvm_validate_checksum(struct wm_softc *);
    918 static void	wm_nvm_version_invm(struct wm_softc *);
    919 static void	wm_nvm_version(struct wm_softc *);
    920 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    921 
    922 /*
    923  * Hardware semaphores.
    924  * Very complexed...
    925  */
    926 static int	wm_get_null(struct wm_softc *);
    927 static void	wm_put_null(struct wm_softc *);
    928 static int	wm_get_eecd(struct wm_softc *);
    929 static void	wm_put_eecd(struct wm_softc *);
    930 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    931 static void	wm_put_swsm_semaphore(struct wm_softc *);
    932 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    933 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    934 static int	wm_get_nvm_80003(struct wm_softc *);
    935 static void	wm_put_nvm_80003(struct wm_softc *);
    936 static int	wm_get_nvm_82571(struct wm_softc *);
    937 static void	wm_put_nvm_82571(struct wm_softc *);
    938 static int	wm_get_phy_82575(struct wm_softc *);
    939 static void	wm_put_phy_82575(struct wm_softc *);
    940 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    941 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    942 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    943 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    944 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    945 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    946 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    947 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    948 
    949 /*
    950  * Management mode and power management related subroutines.
    951  * BMC, AMT, suspend/resume and EEE.
    952  */
    953 #if 0
    954 static int	wm_check_mng_mode(struct wm_softc *);
    955 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    956 static int	wm_check_mng_mode_82574(struct wm_softc *);
    957 static int	wm_check_mng_mode_generic(struct wm_softc *);
    958 #endif
    959 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    960 static bool	wm_phy_resetisblocked(struct wm_softc *);
    961 static void	wm_get_hw_control(struct wm_softc *);
    962 static void	wm_release_hw_control(struct wm_softc *);
    963 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    964 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    965 static void	wm_init_manageability(struct wm_softc *);
    966 static void	wm_release_manageability(struct wm_softc *);
    967 static void	wm_get_wakeup(struct wm_softc *);
    968 static int	wm_ulp_disable(struct wm_softc *);
    969 static int	wm_enable_phy_wakeup(struct wm_softc *);
    970 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    971 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    972 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    973 static void	wm_enable_wakeup(struct wm_softc *);
    974 static void	wm_disable_aspm(struct wm_softc *);
    975 /* LPLU (Low Power Link Up) */
    976 static void	wm_lplu_d0_disable(struct wm_softc *);
    977 /* EEE */
    978 static int	wm_set_eee_i350(struct wm_softc *);
    979 static int	wm_set_eee_pchlan(struct wm_softc *);
    980 static int	wm_set_eee(struct wm_softc *);
    981 
    982 /*
    983  * Workarounds (mainly PHY related).
    984  * Basically, PHY's workarounds are in the PHY drivers.
    985  */
    986 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    987 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    988 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    989 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    990 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    991 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    992 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    993 static int	wm_k1_workaround_lv(struct wm_softc *);
    994 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    995 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    996 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    997 static void	wm_reset_init_script_82575(struct wm_softc *);
    998 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    999 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1000 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1001 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1002 static int	wm_pll_workaround_i210(struct wm_softc *);
   1003 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1004 
   1005 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1006     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1007 
   1008 /*
   1009  * Devices supported by this driver.
   1010  */
   1011 static const struct wm_product {
   1012 	pci_vendor_id_t		wmp_vendor;
   1013 	pci_product_id_t	wmp_product;
   1014 	const char		*wmp_name;
   1015 	wm_chip_type		wmp_type;
   1016 	uint32_t		wmp_flags;
   1017 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1018 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1019 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1020 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1021 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1022 } wm_products[] = {
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1024 	  "Intel i82542 1000BASE-X Ethernet",
   1025 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1028 	  "Intel i82543GC 1000BASE-X Ethernet",
   1029 	  WM_T_82543,		WMP_F_FIBER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1032 	  "Intel i82543GC 1000BASE-T Ethernet",
   1033 	  WM_T_82543,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1036 	  "Intel i82544EI 1000BASE-T Ethernet",
   1037 	  WM_T_82544,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1040 	  "Intel i82544EI 1000BASE-X Ethernet",
   1041 	  WM_T_82544,		WMP_F_FIBER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1044 	  "Intel i82544GC 1000BASE-T Ethernet",
   1045 	  WM_T_82544,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1048 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1049 	  WM_T_82544,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1052 	  "Intel i82540EM 1000BASE-T Ethernet",
   1053 	  WM_T_82540,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1056 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1057 	  WM_T_82540,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1060 	  "Intel i82540EP 1000BASE-T Ethernet",
   1061 	  WM_T_82540,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1064 	  "Intel i82540EP 1000BASE-T Ethernet",
   1065 	  WM_T_82540,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1068 	  "Intel i82540EP 1000BASE-T Ethernet",
   1069 	  WM_T_82540,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1072 	  "Intel i82545EM 1000BASE-T Ethernet",
   1073 	  WM_T_82545,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1076 	  "Intel i82545GM 1000BASE-T Ethernet",
   1077 	  WM_T_82545_3,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1080 	  "Intel i82545GM 1000BASE-X Ethernet",
   1081 	  WM_T_82545_3,		WMP_F_FIBER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1084 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1085 	  WM_T_82545_3,		WMP_F_SERDES },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1088 	  "Intel i82546EB 1000BASE-T Ethernet",
   1089 	  WM_T_82546,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1092 	  "Intel i82546EB 1000BASE-T Ethernet",
   1093 	  WM_T_82546,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1096 	  "Intel i82545EM 1000BASE-X Ethernet",
   1097 	  WM_T_82545,		WMP_F_FIBER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1100 	  "Intel i82546EB 1000BASE-X Ethernet",
   1101 	  WM_T_82546,		WMP_F_FIBER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1104 	  "Intel i82546GB 1000BASE-T Ethernet",
   1105 	  WM_T_82546_3,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1108 	  "Intel i82546GB 1000BASE-X Ethernet",
   1109 	  WM_T_82546_3,		WMP_F_FIBER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1112 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1113 	  WM_T_82546_3,		WMP_F_SERDES },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1116 	  "i82546GB quad-port Gigabit Ethernet",
   1117 	  WM_T_82546_3,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1120 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1121 	  WM_T_82546_3,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1124 	  "Intel PRO/1000MT (82546GB)",
   1125 	  WM_T_82546_3,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1128 	  "Intel i82541EI 1000BASE-T Ethernet",
   1129 	  WM_T_82541,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1132 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1133 	  WM_T_82541,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1136 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1137 	  WM_T_82541,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1140 	  "Intel i82541ER 1000BASE-T Ethernet",
   1141 	  WM_T_82541_2,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1144 	  "Intel i82541GI 1000BASE-T Ethernet",
   1145 	  WM_T_82541_2,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1148 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1149 	  WM_T_82541_2,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1152 	  "Intel i82541PI 1000BASE-T Ethernet",
   1153 	  WM_T_82541_2,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1156 	  "Intel i82547EI 1000BASE-T Ethernet",
   1157 	  WM_T_82547,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1160 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1161 	  WM_T_82547,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1164 	  "Intel i82547GI 1000BASE-T Ethernet",
   1165 	  WM_T_82547_2,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1168 	  "Intel PRO/1000 PT (82571EB)",
   1169 	  WM_T_82571,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1172 	  "Intel PRO/1000 PF (82571EB)",
   1173 	  WM_T_82571,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1176 	  "Intel PRO/1000 PB (82571EB)",
   1177 	  WM_T_82571,		WMP_F_SERDES },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1180 	  "Intel PRO/1000 QT (82571EB)",
   1181 	  WM_T_82571,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1184 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1185 	  WM_T_82571,		WMP_F_COPPER, },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1188 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1189 	  WM_T_82571,		WMP_F_COPPER, },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1192 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1193 	  WM_T_82571,		WMP_F_SERDES, },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1196 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1197 	  WM_T_82571,		WMP_F_SERDES, },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1200 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1201 	  WM_T_82571,		WMP_F_FIBER, },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1204 	  "Intel i82572EI 1000baseT Ethernet",
   1205 	  WM_T_82572,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1208 	  "Intel i82572EI 1000baseX Ethernet",
   1209 	  WM_T_82572,		WMP_F_FIBER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1212 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1213 	  WM_T_82572,		WMP_F_SERDES },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1216 	  "Intel i82572EI 1000baseT Ethernet",
   1217 	  WM_T_82572,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1220 	  "Intel i82573E",
   1221 	  WM_T_82573,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1224 	  "Intel i82573E IAMT",
   1225 	  WM_T_82573,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1228 	  "Intel i82573L Gigabit Ethernet",
   1229 	  WM_T_82573,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1232 	  "Intel i82574L",
   1233 	  WM_T_82574,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1236 	  "Intel i82574L",
   1237 	  WM_T_82574,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1240 	  "Intel i82583V",
   1241 	  WM_T_82583,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1244 	  "i80003 dual 1000baseT Ethernet",
   1245 	  WM_T_80003,		WMP_F_COPPER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1248 	  "i80003 dual 1000baseX Ethernet",
   1249 	  WM_T_80003,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1252 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1253 	  WM_T_80003,		WMP_F_SERDES },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1256 	  "Intel i80003 1000baseT Ethernet",
   1257 	  WM_T_80003,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1260 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1261 	  WM_T_80003,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1264 	  "Intel i82801H (M_AMT) LAN Controller",
   1265 	  WM_T_ICH8,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1267 	  "Intel i82801H (AMT) LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1270 	  "Intel i82801H LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1273 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1274 	  WM_T_ICH8,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1276 	  "Intel i82801H (M) LAN Controller",
   1277 	  WM_T_ICH8,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1279 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1280 	  WM_T_ICH8,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1282 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1283 	  WM_T_ICH8,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1285 	  "82567V-3 LAN Controller",
   1286 	  WM_T_ICH8,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1288 	  "82801I (AMT) LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1291 	  "82801I 10/100 LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1294 	  "82801I (G) 10/100 LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1297 	  "82801I (GT) 10/100 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1300 	  "82801I (C) LAN Controller",
   1301 	  WM_T_ICH9,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1303 	  "82801I mobile LAN Controller",
   1304 	  WM_T_ICH9,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1306 	  "82801I mobile (V) LAN Controller",
   1307 	  WM_T_ICH9,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1309 	  "82801I mobile (AMT) LAN Controller",
   1310 	  WM_T_ICH9,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1312 	  "82567LM-4 LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1315 	  "82567LM-2 LAN Controller",
   1316 	  WM_T_ICH10,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1318 	  "82567LF-2 LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1321 	  "82567LM-3 LAN Controller",
   1322 	  WM_T_ICH10,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1324 	  "82567LF-3 LAN Controller",
   1325 	  WM_T_ICH10,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1327 	  "82567V-2 LAN Controller",
   1328 	  WM_T_ICH10,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1330 	  "82567V-3? LAN Controller",
   1331 	  WM_T_ICH10,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1333 	  "HANKSVILLE LAN Controller",
   1334 	  WM_T_ICH10,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1336 	  "PCH LAN (82577LM) Controller",
   1337 	  WM_T_PCH,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1339 	  "PCH LAN (82577LC) Controller",
   1340 	  WM_T_PCH,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1342 	  "PCH LAN (82578DM) Controller",
   1343 	  WM_T_PCH,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1345 	  "PCH LAN (82578DC) Controller",
   1346 	  WM_T_PCH,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1348 	  "PCH2 LAN (82579LM) Controller",
   1349 	  WM_T_PCH2,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1351 	  "PCH2 LAN (82579V) Controller",
   1352 	  WM_T_PCH2,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1354 	  "82575EB dual-1000baseT Ethernet",
   1355 	  WM_T_82575,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1357 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1358 	  WM_T_82575,		WMP_F_SERDES },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1360 	  "82575GB quad-1000baseT Ethernet",
   1361 	  WM_T_82575,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1363 	  "82575GB quad-1000baseT Ethernet (PM)",
   1364 	  WM_T_82575,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1366 	  "82576 1000BaseT Ethernet",
   1367 	  WM_T_82576,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1369 	  "82576 1000BaseX Ethernet",
   1370 	  WM_T_82576,		WMP_F_FIBER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1373 	  "82576 gigabit Ethernet (SERDES)",
   1374 	  WM_T_82576,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1377 	  "82576 quad-1000BaseT Ethernet",
   1378 	  WM_T_82576,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1381 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1382 	  WM_T_82576,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1385 	  "82576 gigabit Ethernet",
   1386 	  WM_T_82576,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1389 	  "82576 gigabit Ethernet (SERDES)",
   1390 	  WM_T_82576,		WMP_F_SERDES },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1392 	  "82576 quad-gigabit Ethernet (SERDES)",
   1393 	  WM_T_82576,		WMP_F_SERDES },
   1394 
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1396 	  "82580 1000BaseT Ethernet",
   1397 	  WM_T_82580,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1399 	  "82580 1000BaseX Ethernet",
   1400 	  WM_T_82580,		WMP_F_FIBER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1403 	  "82580 1000BaseT Ethernet (SERDES)",
   1404 	  WM_T_82580,		WMP_F_SERDES },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1407 	  "82580 gigabit Ethernet (SGMII)",
   1408 	  WM_T_82580,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1410 	  "82580 dual-1000BaseT Ethernet",
   1411 	  WM_T_82580,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1414 	  "82580 quad-1000BaseX Ethernet",
   1415 	  WM_T_82580,		WMP_F_FIBER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1418 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1419 	  WM_T_82580,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1422 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1423 	  WM_T_82580,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1426 	  "DH89XXCC 1000BASE-KX Ethernet",
   1427 	  WM_T_82580,		WMP_F_SERDES },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1430 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1431 	  WM_T_82580,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1434 	  "I350 Gigabit Network Connection",
   1435 	  WM_T_I350,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1438 	  "I350 Gigabit Fiber Network Connection",
   1439 	  WM_T_I350,		WMP_F_FIBER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1442 	  "I350 Gigabit Backplane Connection",
   1443 	  WM_T_I350,		WMP_F_SERDES },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1446 	  "I350 Quad Port Gigabit Ethernet",
   1447 	  WM_T_I350,		WMP_F_SERDES },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1450 	  "I350 Gigabit Connection",
   1451 	  WM_T_I350,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1454 	  "I354 Gigabit Ethernet (KX)",
   1455 	  WM_T_I354,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1458 	  "I354 Gigabit Ethernet (SGMII)",
   1459 	  WM_T_I354,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1462 	  "I354 Gigabit Ethernet (2.5G)",
   1463 	  WM_T_I354,		WMP_F_COPPER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1466 	  "I210-T1 Ethernet Server Adapter",
   1467 	  WM_T_I210,		WMP_F_COPPER },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1470 	  "I210 Ethernet (Copper OEM)",
   1471 	  WM_T_I210,		WMP_F_COPPER },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1474 	  "I210 Ethernet (Copper IT)",
   1475 	  WM_T_I210,		WMP_F_COPPER },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1478 	  "I210 Ethernet (Copper, FLASH less)",
   1479 	  WM_T_I210,		WMP_F_COPPER },
   1480 
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1482 	  "I210 Gigabit Ethernet (Fiber)",
   1483 	  WM_T_I210,		WMP_F_FIBER },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1486 	  "I210 Gigabit Ethernet (SERDES)",
   1487 	  WM_T_I210,		WMP_F_SERDES },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1490 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1491 	  WM_T_I210,		WMP_F_SERDES },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1494 	  "I210 Gigabit Ethernet (SGMII)",
   1495 	  WM_T_I210,		WMP_F_COPPER },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1498 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1499 	  WM_T_I210,		WMP_F_COPPER },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1502 	  "I211 Ethernet (COPPER)",
   1503 	  WM_T_I211,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1505 	  "I217 V Ethernet Connection",
   1506 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1508 	  "I217 LM Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1511 	  "I218 V Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1517 	  "I218 V Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1520 	  "I218 LM Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1526 	  "I218 LM Ethernet Connection",
   1527 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1529 	  "I219 V Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1538 	  "I219 V Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1556 	  "I219 V Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1559 	  "I219 V Ethernet Connection",
   1560 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1565 	  "I219 LM Ethernet Connection",
   1566 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1567 	{ 0,			0,
   1568 	  NULL,
   1569 	  0,			0 },
   1570 };
   1571 
   1572 /*
   1573  * Register read/write functions.
   1574  * Other than CSR_{READ|WRITE}().
   1575  */
   1576 
   1577 #if 0 /* Not currently used */
   1578 static inline uint32_t
   1579 wm_io_read(struct wm_softc *sc, int reg)
   1580 {
   1581 
   1582 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1583 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1584 }
   1585 #endif
   1586 
   1587 static inline void
   1588 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1589 {
   1590 
   1591 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1592 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1593 }
   1594 
   1595 static inline void
   1596 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1597     uint32_t data)
   1598 {
   1599 	uint32_t regval;
   1600 	int i;
   1601 
   1602 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1603 
   1604 	CSR_WRITE(sc, reg, regval);
   1605 
   1606 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1607 		delay(5);
   1608 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1609 			break;
   1610 	}
   1611 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1612 		aprint_error("%s: WARNING:"
   1613 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1614 		    device_xname(sc->sc_dev), reg);
   1615 	}
   1616 }
   1617 
   1618 static inline void
   1619 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1620 {
   1621 	wa->wa_low = htole32(v & 0xffffffffU);
   1622 	if (sizeof(bus_addr_t) == 8)
   1623 		wa->wa_high = htole32((uint64_t) v >> 32);
   1624 	else
   1625 		wa->wa_high = 0;
   1626 }
   1627 
   1628 /*
   1629  * Descriptor sync/init functions.
   1630  */
   1631 static inline void
   1632 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1633 {
   1634 	struct wm_softc *sc = txq->txq_sc;
   1635 
   1636 	/* If it will wrap around, sync to the end of the ring. */
   1637 	if ((start + num) > WM_NTXDESC(txq)) {
   1638 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1639 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1640 		    (WM_NTXDESC(txq) - start), ops);
   1641 		num -= (WM_NTXDESC(txq) - start);
   1642 		start = 0;
   1643 	}
   1644 
   1645 	/* Now sync whatever is left. */
   1646 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1647 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1648 }
   1649 
   1650 static inline void
   1651 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1652 {
   1653 	struct wm_softc *sc = rxq->rxq_sc;
   1654 
   1655 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1656 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1657 }
   1658 
   1659 static inline void
   1660 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1661 {
   1662 	struct wm_softc *sc = rxq->rxq_sc;
   1663 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1664 	struct mbuf *m = rxs->rxs_mbuf;
   1665 
   1666 	/*
   1667 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1668 	 * so that the payload after the Ethernet header is aligned
   1669 	 * to a 4-byte boundary.
   1670 
   1671 	 * XXX BRAINDAMAGE ALERT!
   1672 	 * The stupid chip uses the same size for every buffer, which
   1673 	 * is set in the Receive Control register.  We are using the 2K
   1674 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1675 	 * reason, we can't "scoot" packets longer than the standard
   1676 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1677 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1678 	 * the upper layer copy the headers.
   1679 	 */
   1680 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1681 
   1682 	if (sc->sc_type == WM_T_82574) {
   1683 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1684 		rxd->erx_data.erxd_addr =
   1685 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1686 		rxd->erx_data.erxd_dd = 0;
   1687 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1688 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1689 
   1690 		rxd->nqrx_data.nrxd_paddr =
   1691 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1692 		/* Currently, split header is not supported. */
   1693 		rxd->nqrx_data.nrxd_haddr = 0;
   1694 	} else {
   1695 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1696 
   1697 		wm_set_dma_addr(&rxd->wrx_addr,
   1698 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1699 		rxd->wrx_len = 0;
   1700 		rxd->wrx_cksum = 0;
   1701 		rxd->wrx_status = 0;
   1702 		rxd->wrx_errors = 0;
   1703 		rxd->wrx_special = 0;
   1704 	}
   1705 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1706 
   1707 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1708 }
   1709 
   1710 /*
   1711  * Device driver interface functions and commonly used functions.
   1712  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1713  */
   1714 
   1715 /* Lookup supported device table */
   1716 static const struct wm_product *
   1717 wm_lookup(const struct pci_attach_args *pa)
   1718 {
   1719 	const struct wm_product *wmp;
   1720 
   1721 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1722 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1723 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1724 			return wmp;
   1725 	}
   1726 	return NULL;
   1727 }
   1728 
   1729 /* The match function (ca_match) */
   1730 static int
   1731 wm_match(device_t parent, cfdata_t cf, void *aux)
   1732 {
   1733 	struct pci_attach_args *pa = aux;
   1734 
   1735 	if (wm_lookup(pa) != NULL)
   1736 		return 1;
   1737 
   1738 	return 0;
   1739 }
   1740 
   1741 /* The attach function (ca_attach) */
   1742 static void
   1743 wm_attach(device_t parent, device_t self, void *aux)
   1744 {
   1745 	struct wm_softc *sc = device_private(self);
   1746 	struct pci_attach_args *pa = aux;
   1747 	prop_dictionary_t dict;
   1748 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1749 	pci_chipset_tag_t pc = pa->pa_pc;
   1750 	int counts[PCI_INTR_TYPE_SIZE];
   1751 	pci_intr_type_t max_type;
   1752 	const char *eetype, *xname;
   1753 	bus_space_tag_t memt;
   1754 	bus_space_handle_t memh;
   1755 	bus_size_t memsize;
   1756 	int memh_valid;
   1757 	int i, error;
   1758 	const struct wm_product *wmp;
   1759 	prop_data_t ea;
   1760 	prop_number_t pn;
   1761 	uint8_t enaddr[ETHER_ADDR_LEN];
   1762 	char buf[256];
   1763 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1764 	pcireg_t preg, memtype;
   1765 	uint16_t eeprom_data, apme_mask;
   1766 	bool force_clear_smbi;
   1767 	uint32_t link_mode;
   1768 	uint32_t reg;
   1769 
   1770 	sc->sc_dev = self;
   1771 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1772 	sc->sc_core_stopping = false;
   1773 
   1774 	wmp = wm_lookup(pa);
   1775 #ifdef DIAGNOSTIC
   1776 	if (wmp == NULL) {
   1777 		printf("\n");
   1778 		panic("wm_attach: impossible");
   1779 	}
   1780 #endif
   1781 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1782 
   1783 	sc->sc_pc = pa->pa_pc;
   1784 	sc->sc_pcitag = pa->pa_tag;
   1785 
   1786 	if (pci_dma64_available(pa))
   1787 		sc->sc_dmat = pa->pa_dmat64;
   1788 	else
   1789 		sc->sc_dmat = pa->pa_dmat;
   1790 
   1791 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1792 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1793 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1794 
   1795 	sc->sc_type = wmp->wmp_type;
   1796 
   1797 	/* Set default function pointers */
   1798 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1799 	sc->phy.release = sc->nvm.release = wm_put_null;
   1800 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1801 
   1802 	if (sc->sc_type < WM_T_82543) {
   1803 		if (sc->sc_rev < 2) {
   1804 			aprint_error_dev(sc->sc_dev,
   1805 			    "i82542 must be at least rev. 2\n");
   1806 			return;
   1807 		}
   1808 		if (sc->sc_rev < 3)
   1809 			sc->sc_type = WM_T_82542_2_0;
   1810 	}
   1811 
   1812 	/*
   1813 	 * Disable MSI for Errata:
   1814 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1815 	 *
   1816 	 *  82544: Errata 25
   1817 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1818 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1819 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1820 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1821 	 *
   1822 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1823 	 *
   1824 	 *  82571 & 82572: Errata 63
   1825 	 */
   1826 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1827 	    || (sc->sc_type == WM_T_82572))
   1828 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1829 
   1830 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1831 	    || (sc->sc_type == WM_T_82580)
   1832 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1833 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1834 		sc->sc_flags |= WM_F_NEWQUEUE;
   1835 
   1836 	/* Set device properties (mactype) */
   1837 	dict = device_properties(sc->sc_dev);
   1838 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1839 
   1840 	/*
   1841 	 * Map the device.  All devices support memory-mapped acccess,
   1842 	 * and it is really required for normal operation.
   1843 	 */
   1844 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1845 	switch (memtype) {
   1846 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1847 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1848 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1849 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1850 		break;
   1851 	default:
   1852 		memh_valid = 0;
   1853 		break;
   1854 	}
   1855 
   1856 	if (memh_valid) {
   1857 		sc->sc_st = memt;
   1858 		sc->sc_sh = memh;
   1859 		sc->sc_ss = memsize;
   1860 	} else {
   1861 		aprint_error_dev(sc->sc_dev,
   1862 		    "unable to map device registers\n");
   1863 		return;
   1864 	}
   1865 
   1866 	/*
   1867 	 * In addition, i82544 and later support I/O mapped indirect
   1868 	 * register access.  It is not desirable (nor supported in
   1869 	 * this driver) to use it for normal operation, though it is
   1870 	 * required to work around bugs in some chip versions.
   1871 	 */
   1872 	if (sc->sc_type >= WM_T_82544) {
   1873 		/* First we have to find the I/O BAR. */
   1874 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1875 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1876 			if (memtype == PCI_MAPREG_TYPE_IO)
   1877 				break;
   1878 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1879 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1880 				i += 4;	/* skip high bits, too */
   1881 		}
   1882 		if (i < PCI_MAPREG_END) {
   1883 			/*
   1884 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1885 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1886 			 * It's no problem because newer chips has no this
   1887 			 * bug.
   1888 			 *
   1889 			 * The i8254x doesn't apparently respond when the
   1890 			 * I/O BAR is 0, which looks somewhat like it's not
   1891 			 * been configured.
   1892 			 */
   1893 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1894 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1895 				aprint_error_dev(sc->sc_dev,
   1896 				    "WARNING: I/O BAR at zero.\n");
   1897 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1898 					0, &sc->sc_iot, &sc->sc_ioh,
   1899 					NULL, &sc->sc_ios) == 0) {
   1900 				sc->sc_flags |= WM_F_IOH_VALID;
   1901 			} else
   1902 				aprint_error_dev(sc->sc_dev,
   1903 				    "WARNING: unable to map I/O space\n");
   1904 		}
   1905 
   1906 	}
   1907 
   1908 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1909 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1910 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1911 	if (sc->sc_type < WM_T_82542_2_1)
   1912 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1913 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1914 
   1915 	/* power up chip */
   1916 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1917 	    && error != EOPNOTSUPP) {
   1918 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1919 		return;
   1920 	}
   1921 
   1922 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1923 	/*
   1924 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1925 	 * resource.
   1926 	 */
   1927 	if (sc->sc_nqueues > 1) {
   1928 		max_type = PCI_INTR_TYPE_MSIX;
   1929 		/*
   1930 		 *  82583 has a MSI-X capability in the PCI configuration space
   1931 		 * but it doesn't support it. At least the document doesn't
   1932 		 * say anything about MSI-X.
   1933 		 */
   1934 		counts[PCI_INTR_TYPE_MSIX]
   1935 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1936 	} else {
   1937 		max_type = PCI_INTR_TYPE_MSI;
   1938 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1939 	}
   1940 
   1941 	/* Allocation settings */
   1942 	counts[PCI_INTR_TYPE_MSI] = 1;
   1943 	counts[PCI_INTR_TYPE_INTX] = 1;
   1944 	/* overridden by disable flags */
   1945 	if (wm_disable_msi != 0) {
   1946 		counts[PCI_INTR_TYPE_MSI] = 0;
   1947 		if (wm_disable_msix != 0) {
   1948 			max_type = PCI_INTR_TYPE_INTX;
   1949 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1950 		}
   1951 	} else if (wm_disable_msix != 0) {
   1952 		max_type = PCI_INTR_TYPE_MSI;
   1953 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1954 	}
   1955 
   1956 alloc_retry:
   1957 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1958 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1959 		return;
   1960 	}
   1961 
   1962 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1963 		error = wm_setup_msix(sc);
   1964 		if (error) {
   1965 			pci_intr_release(pc, sc->sc_intrs,
   1966 			    counts[PCI_INTR_TYPE_MSIX]);
   1967 
   1968 			/* Setup for MSI: Disable MSI-X */
   1969 			max_type = PCI_INTR_TYPE_MSI;
   1970 			counts[PCI_INTR_TYPE_MSI] = 1;
   1971 			counts[PCI_INTR_TYPE_INTX] = 1;
   1972 			goto alloc_retry;
   1973 		}
   1974 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1975 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1976 		error = wm_setup_legacy(sc);
   1977 		if (error) {
   1978 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1979 			    counts[PCI_INTR_TYPE_MSI]);
   1980 
   1981 			/* The next try is for INTx: Disable MSI */
   1982 			max_type = PCI_INTR_TYPE_INTX;
   1983 			counts[PCI_INTR_TYPE_INTX] = 1;
   1984 			goto alloc_retry;
   1985 		}
   1986 	} else {
   1987 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1988 		error = wm_setup_legacy(sc);
   1989 		if (error) {
   1990 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1991 			    counts[PCI_INTR_TYPE_INTX]);
   1992 			return;
   1993 		}
   1994 	}
   1995 
   1996 	/*
   1997 	 * Check the function ID (unit number of the chip).
   1998 	 */
   1999 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2000 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2001 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2002 	    || (sc->sc_type == WM_T_82580)
   2003 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2004 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2005 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2006 	else
   2007 		sc->sc_funcid = 0;
   2008 
   2009 	/*
   2010 	 * Determine a few things about the bus we're connected to.
   2011 	 */
   2012 	if (sc->sc_type < WM_T_82543) {
   2013 		/* We don't really know the bus characteristics here. */
   2014 		sc->sc_bus_speed = 33;
   2015 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2016 		/*
   2017 		 * CSA (Communication Streaming Architecture) is about as fast
   2018 		 * a 32-bit 66MHz PCI Bus.
   2019 		 */
   2020 		sc->sc_flags |= WM_F_CSA;
   2021 		sc->sc_bus_speed = 66;
   2022 		aprint_verbose_dev(sc->sc_dev,
   2023 		    "Communication Streaming Architecture\n");
   2024 		if (sc->sc_type == WM_T_82547) {
   2025 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2026 			callout_setfunc(&sc->sc_txfifo_ch,
   2027 			    wm_82547_txfifo_stall, sc);
   2028 			aprint_verbose_dev(sc->sc_dev,
   2029 			    "using 82547 Tx FIFO stall work-around\n");
   2030 		}
   2031 	} else if (sc->sc_type >= WM_T_82571) {
   2032 		sc->sc_flags |= WM_F_PCIE;
   2033 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2034 		    && (sc->sc_type != WM_T_ICH10)
   2035 		    && (sc->sc_type != WM_T_PCH)
   2036 		    && (sc->sc_type != WM_T_PCH2)
   2037 		    && (sc->sc_type != WM_T_PCH_LPT)
   2038 		    && (sc->sc_type != WM_T_PCH_SPT)
   2039 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2040 			/* ICH* and PCH* have no PCIe capability registers */
   2041 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2042 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2043 				NULL) == 0)
   2044 				aprint_error_dev(sc->sc_dev,
   2045 				    "unable to find PCIe capability\n");
   2046 		}
   2047 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2048 	} else {
   2049 		reg = CSR_READ(sc, WMREG_STATUS);
   2050 		if (reg & STATUS_BUS64)
   2051 			sc->sc_flags |= WM_F_BUS64;
   2052 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2053 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2054 
   2055 			sc->sc_flags |= WM_F_PCIX;
   2056 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2057 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2058 				aprint_error_dev(sc->sc_dev,
   2059 				    "unable to find PCIX capability\n");
   2060 			else if (sc->sc_type != WM_T_82545_3 &&
   2061 				 sc->sc_type != WM_T_82546_3) {
   2062 				/*
   2063 				 * Work around a problem caused by the BIOS
   2064 				 * setting the max memory read byte count
   2065 				 * incorrectly.
   2066 				 */
   2067 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2068 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2069 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2070 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2071 
   2072 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2073 				    PCIX_CMD_BYTECNT_SHIFT;
   2074 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2075 				    PCIX_STATUS_MAXB_SHIFT;
   2076 				if (bytecnt > maxb) {
   2077 					aprint_verbose_dev(sc->sc_dev,
   2078 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2079 					    512 << bytecnt, 512 << maxb);
   2080 					pcix_cmd = (pcix_cmd &
   2081 					    ~PCIX_CMD_BYTECNT_MASK) |
   2082 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2083 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2084 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2085 					    pcix_cmd);
   2086 				}
   2087 			}
   2088 		}
   2089 		/*
   2090 		 * The quad port adapter is special; it has a PCIX-PCIX
   2091 		 * bridge on the board, and can run the secondary bus at
   2092 		 * a higher speed.
   2093 		 */
   2094 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2095 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2096 								      : 66;
   2097 		} else if (sc->sc_flags & WM_F_PCIX) {
   2098 			switch (reg & STATUS_PCIXSPD_MASK) {
   2099 			case STATUS_PCIXSPD_50_66:
   2100 				sc->sc_bus_speed = 66;
   2101 				break;
   2102 			case STATUS_PCIXSPD_66_100:
   2103 				sc->sc_bus_speed = 100;
   2104 				break;
   2105 			case STATUS_PCIXSPD_100_133:
   2106 				sc->sc_bus_speed = 133;
   2107 				break;
   2108 			default:
   2109 				aprint_error_dev(sc->sc_dev,
   2110 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2111 				    reg & STATUS_PCIXSPD_MASK);
   2112 				sc->sc_bus_speed = 66;
   2113 				break;
   2114 			}
   2115 		} else
   2116 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2117 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2118 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2119 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2120 	}
   2121 
   2122 	/* clear interesting stat counters */
   2123 	CSR_READ(sc, WMREG_COLC);
   2124 	CSR_READ(sc, WMREG_RXERRC);
   2125 
   2126 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2127 	    || (sc->sc_type >= WM_T_ICH8))
   2128 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2129 	if (sc->sc_type >= WM_T_ICH8)
   2130 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2131 
   2132 	/* Set PHY, NVM mutex related stuff */
   2133 	switch (sc->sc_type) {
   2134 	case WM_T_82542_2_0:
   2135 	case WM_T_82542_2_1:
   2136 	case WM_T_82543:
   2137 	case WM_T_82544:
   2138 		/* Microwire */
   2139 		sc->nvm.read = wm_nvm_read_uwire;
   2140 		sc->sc_nvm_wordsize = 64;
   2141 		sc->sc_nvm_addrbits = 6;
   2142 		break;
   2143 	case WM_T_82540:
   2144 	case WM_T_82545:
   2145 	case WM_T_82545_3:
   2146 	case WM_T_82546:
   2147 	case WM_T_82546_3:
   2148 		/* Microwire */
   2149 		sc->nvm.read = wm_nvm_read_uwire;
   2150 		reg = CSR_READ(sc, WMREG_EECD);
   2151 		if (reg & EECD_EE_SIZE) {
   2152 			sc->sc_nvm_wordsize = 256;
   2153 			sc->sc_nvm_addrbits = 8;
   2154 		} else {
   2155 			sc->sc_nvm_wordsize = 64;
   2156 			sc->sc_nvm_addrbits = 6;
   2157 		}
   2158 		sc->sc_flags |= WM_F_LOCK_EECD;
   2159 		sc->nvm.acquire = wm_get_eecd;
   2160 		sc->nvm.release = wm_put_eecd;
   2161 		break;
   2162 	case WM_T_82541:
   2163 	case WM_T_82541_2:
   2164 	case WM_T_82547:
   2165 	case WM_T_82547_2:
   2166 		reg = CSR_READ(sc, WMREG_EECD);
   2167 		/*
   2168 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2169 		 * on 8254[17], so set flags and functios before calling it.
   2170 		 */
   2171 		sc->sc_flags |= WM_F_LOCK_EECD;
   2172 		sc->nvm.acquire = wm_get_eecd;
   2173 		sc->nvm.release = wm_put_eecd;
   2174 		if (reg & EECD_EE_TYPE) {
   2175 			/* SPI */
   2176 			sc->nvm.read = wm_nvm_read_spi;
   2177 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2178 			wm_nvm_set_addrbits_size_eecd(sc);
   2179 		} else {
   2180 			/* Microwire */
   2181 			sc->nvm.read = wm_nvm_read_uwire;
   2182 			if ((reg & EECD_EE_ABITS) != 0) {
   2183 				sc->sc_nvm_wordsize = 256;
   2184 				sc->sc_nvm_addrbits = 8;
   2185 			} else {
   2186 				sc->sc_nvm_wordsize = 64;
   2187 				sc->sc_nvm_addrbits = 6;
   2188 			}
   2189 		}
   2190 		break;
   2191 	case WM_T_82571:
   2192 	case WM_T_82572:
   2193 		/* SPI */
   2194 		sc->nvm.read = wm_nvm_read_eerd;
   2195 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2196 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2197 		wm_nvm_set_addrbits_size_eecd(sc);
   2198 		sc->phy.acquire = wm_get_swsm_semaphore;
   2199 		sc->phy.release = wm_put_swsm_semaphore;
   2200 		sc->nvm.acquire = wm_get_nvm_82571;
   2201 		sc->nvm.release = wm_put_nvm_82571;
   2202 		break;
   2203 	case WM_T_82573:
   2204 	case WM_T_82574:
   2205 	case WM_T_82583:
   2206 		sc->nvm.read = wm_nvm_read_eerd;
   2207 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2208 		if (sc->sc_type == WM_T_82573) {
   2209 			sc->phy.acquire = wm_get_swsm_semaphore;
   2210 			sc->phy.release = wm_put_swsm_semaphore;
   2211 			sc->nvm.acquire = wm_get_nvm_82571;
   2212 			sc->nvm.release = wm_put_nvm_82571;
   2213 		} else {
   2214 			/* Both PHY and NVM use the same semaphore. */
   2215 			sc->phy.acquire = sc->nvm.acquire
   2216 			    = wm_get_swfwhw_semaphore;
   2217 			sc->phy.release = sc->nvm.release
   2218 			    = wm_put_swfwhw_semaphore;
   2219 		}
   2220 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2221 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2222 			sc->sc_nvm_wordsize = 2048;
   2223 		} else {
   2224 			/* SPI */
   2225 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2226 			wm_nvm_set_addrbits_size_eecd(sc);
   2227 		}
   2228 		break;
   2229 	case WM_T_82575:
   2230 	case WM_T_82576:
   2231 	case WM_T_82580:
   2232 	case WM_T_I350:
   2233 	case WM_T_I354:
   2234 	case WM_T_80003:
   2235 		/* SPI */
   2236 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2237 		wm_nvm_set_addrbits_size_eecd(sc);
   2238 		if ((sc->sc_type == WM_T_80003)
   2239 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2240 			sc->nvm.read = wm_nvm_read_eerd;
   2241 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2242 		} else {
   2243 			sc->nvm.read = wm_nvm_read_spi;
   2244 			sc->sc_flags |= WM_F_LOCK_EECD;
   2245 		}
   2246 		sc->phy.acquire = wm_get_phy_82575;
   2247 		sc->phy.release = wm_put_phy_82575;
   2248 		sc->nvm.acquire = wm_get_nvm_80003;
   2249 		sc->nvm.release = wm_put_nvm_80003;
   2250 		break;
   2251 	case WM_T_ICH8:
   2252 	case WM_T_ICH9:
   2253 	case WM_T_ICH10:
   2254 	case WM_T_PCH:
   2255 	case WM_T_PCH2:
   2256 	case WM_T_PCH_LPT:
   2257 		sc->nvm.read = wm_nvm_read_ich8;
   2258 		/* FLASH */
   2259 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2260 		sc->sc_nvm_wordsize = 2048;
   2261 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2262 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2263 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2264 			aprint_error_dev(sc->sc_dev,
   2265 			    "can't map FLASH registers\n");
   2266 			goto out;
   2267 		}
   2268 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2269 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2270 		    ICH_FLASH_SECTOR_SIZE;
   2271 		sc->sc_ich8_flash_bank_size =
   2272 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2273 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2274 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2275 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2276 		sc->sc_flashreg_offset = 0;
   2277 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2278 		sc->phy.release = wm_put_swflag_ich8lan;
   2279 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2280 		sc->nvm.release = wm_put_nvm_ich8lan;
   2281 		break;
   2282 	case WM_T_PCH_SPT:
   2283 	case WM_T_PCH_CNP:
   2284 		sc->nvm.read = wm_nvm_read_spt;
   2285 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2286 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2287 		sc->sc_flasht = sc->sc_st;
   2288 		sc->sc_flashh = sc->sc_sh;
   2289 		sc->sc_ich8_flash_base = 0;
   2290 		sc->sc_nvm_wordsize =
   2291 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2292 		    * NVM_SIZE_MULTIPLIER;
   2293 		/* It is size in bytes, we want words */
   2294 		sc->sc_nvm_wordsize /= 2;
   2295 		/* assume 2 banks */
   2296 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2297 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2298 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2299 		sc->phy.release = wm_put_swflag_ich8lan;
   2300 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2301 		sc->nvm.release = wm_put_nvm_ich8lan;
   2302 		break;
   2303 	case WM_T_I210:
   2304 	case WM_T_I211:
   2305 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2306 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2307 		if (wm_nvm_flash_presence_i210(sc)) {
   2308 			sc->nvm.read = wm_nvm_read_eerd;
   2309 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2310 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2311 			wm_nvm_set_addrbits_size_eecd(sc);
   2312 		} else {
   2313 			sc->nvm.read = wm_nvm_read_invm;
   2314 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2315 			sc->sc_nvm_wordsize = INVM_SIZE;
   2316 		}
   2317 		sc->phy.acquire = wm_get_phy_82575;
   2318 		sc->phy.release = wm_put_phy_82575;
   2319 		sc->nvm.acquire = wm_get_nvm_80003;
   2320 		sc->nvm.release = wm_put_nvm_80003;
   2321 		break;
   2322 	default:
   2323 		break;
   2324 	}
   2325 
   2326 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2327 	switch (sc->sc_type) {
   2328 	case WM_T_82571:
   2329 	case WM_T_82572:
   2330 		reg = CSR_READ(sc, WMREG_SWSM2);
   2331 		if ((reg & SWSM2_LOCK) == 0) {
   2332 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2333 			force_clear_smbi = true;
   2334 		} else
   2335 			force_clear_smbi = false;
   2336 		break;
   2337 	case WM_T_82573:
   2338 	case WM_T_82574:
   2339 	case WM_T_82583:
   2340 		force_clear_smbi = true;
   2341 		break;
   2342 	default:
   2343 		force_clear_smbi = false;
   2344 		break;
   2345 	}
   2346 	if (force_clear_smbi) {
   2347 		reg = CSR_READ(sc, WMREG_SWSM);
   2348 		if ((reg & SWSM_SMBI) != 0)
   2349 			aprint_error_dev(sc->sc_dev,
   2350 			    "Please update the Bootagent\n");
   2351 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2352 	}
   2353 
   2354 	/*
   2355 	 * Defer printing the EEPROM type until after verifying the checksum
   2356 	 * This allows the EEPROM type to be printed correctly in the case
   2357 	 * that no EEPROM is attached.
   2358 	 */
   2359 	/*
   2360 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2361 	 * this for later, so we can fail future reads from the EEPROM.
   2362 	 */
   2363 	if (wm_nvm_validate_checksum(sc)) {
   2364 		/*
   2365 		 * Read twice again because some PCI-e parts fail the
   2366 		 * first check due to the link being in sleep state.
   2367 		 */
   2368 		if (wm_nvm_validate_checksum(sc))
   2369 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2370 	}
   2371 
   2372 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2373 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2374 	else {
   2375 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2376 		    sc->sc_nvm_wordsize);
   2377 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2378 			aprint_verbose("iNVM");
   2379 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2380 			aprint_verbose("FLASH(HW)");
   2381 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2382 			aprint_verbose("FLASH");
   2383 		else {
   2384 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2385 				eetype = "SPI";
   2386 			else
   2387 				eetype = "MicroWire";
   2388 			aprint_verbose("(%d address bits) %s EEPROM",
   2389 			    sc->sc_nvm_addrbits, eetype);
   2390 		}
   2391 	}
   2392 	wm_nvm_version(sc);
   2393 	aprint_verbose("\n");
   2394 
   2395 	/*
   2396 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2397 	 * incorrect.
   2398 	 */
   2399 	wm_gmii_setup_phytype(sc, 0, 0);
   2400 
   2401 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2402 	switch (sc->sc_type) {
   2403 	case WM_T_ICH8:
   2404 	case WM_T_ICH9:
   2405 	case WM_T_ICH10:
   2406 	case WM_T_PCH:
   2407 	case WM_T_PCH2:
   2408 	case WM_T_PCH_LPT:
   2409 	case WM_T_PCH_SPT:
   2410 	case WM_T_PCH_CNP:
   2411 		apme_mask = WUC_APME;
   2412 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2413 		if ((eeprom_data & apme_mask) != 0)
   2414 			sc->sc_flags |= WM_F_WOL;
   2415 		break;
   2416 	default:
   2417 		break;
   2418 	}
   2419 
   2420 	/* Reset the chip to a known state. */
   2421 	wm_reset(sc);
   2422 
   2423 	/*
   2424 	 * Check for I21[01] PLL workaround.
   2425 	 *
   2426 	 * Three cases:
   2427 	 * a) Chip is I211.
   2428 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2429 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2430 	 */
   2431 	if (sc->sc_type == WM_T_I211)
   2432 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2433 	if (sc->sc_type == WM_T_I210) {
   2434 		if (!wm_nvm_flash_presence_i210(sc))
   2435 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2436 		else if ((sc->sc_nvm_ver_major < 3)
   2437 		    || ((sc->sc_nvm_ver_major == 3)
   2438 			&& (sc->sc_nvm_ver_minor < 25))) {
   2439 			aprint_verbose_dev(sc->sc_dev,
   2440 			    "ROM image version %d.%d is older than 3.25\n",
   2441 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2442 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2443 		}
   2444 	}
   2445 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2446 		wm_pll_workaround_i210(sc);
   2447 
   2448 	wm_get_wakeup(sc);
   2449 
   2450 	/* Non-AMT based hardware can now take control from firmware */
   2451 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2452 		wm_get_hw_control(sc);
   2453 
   2454 	/*
   2455 	 * Read the Ethernet address from the EEPROM, if not first found
   2456 	 * in device properties.
   2457 	 */
   2458 	ea = prop_dictionary_get(dict, "mac-address");
   2459 	if (ea != NULL) {
   2460 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2461 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2462 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2463 	} else {
   2464 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2465 			aprint_error_dev(sc->sc_dev,
   2466 			    "unable to read Ethernet address\n");
   2467 			goto out;
   2468 		}
   2469 	}
   2470 
   2471 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2472 	    ether_sprintf(enaddr));
   2473 
   2474 	/*
   2475 	 * Read the config info from the EEPROM, and set up various
   2476 	 * bits in the control registers based on their contents.
   2477 	 */
   2478 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2479 	if (pn != NULL) {
   2480 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2481 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2482 	} else {
   2483 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2484 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2485 			goto out;
   2486 		}
   2487 	}
   2488 
   2489 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2490 	if (pn != NULL) {
   2491 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2492 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2493 	} else {
   2494 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2495 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2496 			goto out;
   2497 		}
   2498 	}
   2499 
   2500 	/* check for WM_F_WOL */
   2501 	switch (sc->sc_type) {
   2502 	case WM_T_82542_2_0:
   2503 	case WM_T_82542_2_1:
   2504 	case WM_T_82543:
   2505 		/* dummy? */
   2506 		eeprom_data = 0;
   2507 		apme_mask = NVM_CFG3_APME;
   2508 		break;
   2509 	case WM_T_82544:
   2510 		apme_mask = NVM_CFG2_82544_APM_EN;
   2511 		eeprom_data = cfg2;
   2512 		break;
   2513 	case WM_T_82546:
   2514 	case WM_T_82546_3:
   2515 	case WM_T_82571:
   2516 	case WM_T_82572:
   2517 	case WM_T_82573:
   2518 	case WM_T_82574:
   2519 	case WM_T_82583:
   2520 	case WM_T_80003:
   2521 	case WM_T_82575:
   2522 	case WM_T_82576:
   2523 		apme_mask = NVM_CFG3_APME;
   2524 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2525 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2526 		break;
   2527 	case WM_T_82580:
   2528 	case WM_T_I350:
   2529 	case WM_T_I354:
   2530 	case WM_T_I210:
   2531 	case WM_T_I211:
   2532 		apme_mask = NVM_CFG3_APME;
   2533 		wm_nvm_read(sc,
   2534 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2535 		    1, &eeprom_data);
   2536 		break;
   2537 	case WM_T_ICH8:
   2538 	case WM_T_ICH9:
   2539 	case WM_T_ICH10:
   2540 	case WM_T_PCH:
   2541 	case WM_T_PCH2:
   2542 	case WM_T_PCH_LPT:
   2543 	case WM_T_PCH_SPT:
   2544 	case WM_T_PCH_CNP:
   2545 		/* Already checked before wm_reset () */
   2546 		apme_mask = eeprom_data = 0;
   2547 		break;
   2548 	default: /* XXX 82540 */
   2549 		apme_mask = NVM_CFG3_APME;
   2550 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2551 		break;
   2552 	}
   2553 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2554 	if ((eeprom_data & apme_mask) != 0)
   2555 		sc->sc_flags |= WM_F_WOL;
   2556 
   2557 	/*
   2558 	 * We have the eeprom settings, now apply the special cases
   2559 	 * where the eeprom may be wrong or the board won't support
   2560 	 * wake on lan on a particular port
   2561 	 */
   2562 	switch (sc->sc_pcidevid) {
   2563 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2564 		sc->sc_flags &= ~WM_F_WOL;
   2565 		break;
   2566 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2567 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2568 		/* Wake events only supported on port A for dual fiber
   2569 		 * regardless of eeprom setting */
   2570 		if (sc->sc_funcid == 1)
   2571 			sc->sc_flags &= ~WM_F_WOL;
   2572 		break;
   2573 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2574 		/* if quad port adapter, disable WoL on all but port A */
   2575 		if (sc->sc_funcid != 0)
   2576 			sc->sc_flags &= ~WM_F_WOL;
   2577 		break;
   2578 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2579 		/* Wake events only supported on port A for dual fiber
   2580 		 * regardless of eeprom setting */
   2581 		if (sc->sc_funcid == 1)
   2582 			sc->sc_flags &= ~WM_F_WOL;
   2583 		break;
   2584 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2585 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2586 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2587 		/* if quad port adapter, disable WoL on all but port A */
   2588 		if (sc->sc_funcid != 0)
   2589 			sc->sc_flags &= ~WM_F_WOL;
   2590 		break;
   2591 	}
   2592 
   2593 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2594 		/* Check NVM for autonegotiation */
   2595 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2596 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2597 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2598 		}
   2599 	}
   2600 
   2601 	/*
   2602 	 * XXX need special handling for some multiple port cards
   2603 	 * to disable a paticular port.
   2604 	 */
   2605 
   2606 	if (sc->sc_type >= WM_T_82544) {
   2607 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2608 		if (pn != NULL) {
   2609 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2610 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2611 		} else {
   2612 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2613 				aprint_error_dev(sc->sc_dev,
   2614 				    "unable to read SWDPIN\n");
   2615 				goto out;
   2616 			}
   2617 		}
   2618 	}
   2619 
   2620 	if (cfg1 & NVM_CFG1_ILOS)
   2621 		sc->sc_ctrl |= CTRL_ILOS;
   2622 
   2623 	/*
   2624 	 * XXX
   2625 	 * This code isn't correct because pin 2 and 3 are located
   2626 	 * in different position on newer chips. Check all datasheet.
   2627 	 *
   2628 	 * Until resolve this problem, check if a chip < 82580
   2629 	 */
   2630 	if (sc->sc_type <= WM_T_82580) {
   2631 		if (sc->sc_type >= WM_T_82544) {
   2632 			sc->sc_ctrl |=
   2633 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2634 			    CTRL_SWDPIO_SHIFT;
   2635 			sc->sc_ctrl |=
   2636 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2637 			    CTRL_SWDPINS_SHIFT;
   2638 		} else {
   2639 			sc->sc_ctrl |=
   2640 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2641 			    CTRL_SWDPIO_SHIFT;
   2642 		}
   2643 	}
   2644 
   2645 	/* XXX For other than 82580? */
   2646 	if (sc->sc_type == WM_T_82580) {
   2647 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2648 		if (nvmword & __BIT(13))
   2649 			sc->sc_ctrl |= CTRL_ILOS;
   2650 	}
   2651 
   2652 #if 0
   2653 	if (sc->sc_type >= WM_T_82544) {
   2654 		if (cfg1 & NVM_CFG1_IPS0)
   2655 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2656 		if (cfg1 & NVM_CFG1_IPS1)
   2657 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2658 		sc->sc_ctrl_ext |=
   2659 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2660 		    CTRL_EXT_SWDPIO_SHIFT;
   2661 		sc->sc_ctrl_ext |=
   2662 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2663 		    CTRL_EXT_SWDPINS_SHIFT;
   2664 	} else {
   2665 		sc->sc_ctrl_ext |=
   2666 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2667 		    CTRL_EXT_SWDPIO_SHIFT;
   2668 	}
   2669 #endif
   2670 
   2671 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2672 #if 0
   2673 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2674 #endif
   2675 
   2676 	if (sc->sc_type == WM_T_PCH) {
   2677 		uint16_t val;
   2678 
   2679 		/* Save the NVM K1 bit setting */
   2680 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2681 
   2682 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2683 			sc->sc_nvm_k1_enabled = 1;
   2684 		else
   2685 			sc->sc_nvm_k1_enabled = 0;
   2686 	}
   2687 
   2688 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2689 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2690 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2691 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2692 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2693 	    || sc->sc_type == WM_T_82573
   2694 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2695 		/* Copper only */
   2696 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2697 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2698 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2699 	    || (sc->sc_type ==WM_T_I211)) {
   2700 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2701 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2702 		switch (link_mode) {
   2703 		case CTRL_EXT_LINK_MODE_1000KX:
   2704 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2705 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2706 			break;
   2707 		case CTRL_EXT_LINK_MODE_SGMII:
   2708 			if (wm_sgmii_uses_mdio(sc)) {
   2709 				aprint_verbose_dev(sc->sc_dev,
   2710 				    "SGMII(MDIO)\n");
   2711 				sc->sc_flags |= WM_F_SGMII;
   2712 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2713 				break;
   2714 			}
   2715 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2716 			/*FALLTHROUGH*/
   2717 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2718 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2719 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2720 				if (link_mode
   2721 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2722 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2723 					sc->sc_flags |= WM_F_SGMII;
   2724 				} else {
   2725 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2726 					aprint_verbose_dev(sc->sc_dev,
   2727 					    "SERDES\n");
   2728 				}
   2729 				break;
   2730 			}
   2731 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2732 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2733 
   2734 			/* Change current link mode setting */
   2735 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2736 			switch (sc->sc_mediatype) {
   2737 			case WM_MEDIATYPE_COPPER:
   2738 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2739 				break;
   2740 			case WM_MEDIATYPE_SERDES:
   2741 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2742 				break;
   2743 			default:
   2744 				break;
   2745 			}
   2746 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2747 			break;
   2748 		case CTRL_EXT_LINK_MODE_GMII:
   2749 		default:
   2750 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2751 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2752 			break;
   2753 		}
   2754 
   2755 		reg &= ~CTRL_EXT_I2C_ENA;
   2756 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2757 			reg |= CTRL_EXT_I2C_ENA;
   2758 		else
   2759 			reg &= ~CTRL_EXT_I2C_ENA;
   2760 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2761 	} else if (sc->sc_type < WM_T_82543 ||
   2762 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2763 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2764 			aprint_error_dev(sc->sc_dev,
   2765 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2766 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2767 		}
   2768 	} else {
   2769 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2770 			aprint_error_dev(sc->sc_dev,
   2771 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2772 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2773 		}
   2774 	}
   2775 
   2776 	if (sc->sc_type >= WM_T_PCH2)
   2777 		sc->sc_flags |= WM_F_EEE;
   2778 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2779 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2780 		/* XXX: Need special handling for I354. (not yet) */
   2781 		if (sc->sc_type != WM_T_I354)
   2782 			sc->sc_flags |= WM_F_EEE;
   2783 	}
   2784 
   2785 	/* Set device properties (macflags) */
   2786 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2787 
   2788 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2789 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2790 
   2791 	/* Initialize the media structures accordingly. */
   2792 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2793 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2794 	else
   2795 		wm_tbi_mediainit(sc); /* All others */
   2796 
   2797 	ifp = &sc->sc_ethercom.ec_if;
   2798 	xname = device_xname(sc->sc_dev);
   2799 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2800 	ifp->if_softc = sc;
   2801 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2802 #ifdef WM_MPSAFE
   2803 	ifp->if_extflags = IFEF_MPSAFE;
   2804 #endif
   2805 	ifp->if_ioctl = wm_ioctl;
   2806 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2807 		ifp->if_start = wm_nq_start;
   2808 		/*
   2809 		 * When the number of CPUs is one and the controller can use
   2810 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2811 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2812 		 * and the other is used for link status changing.
   2813 		 * In this situation, wm_nq_transmit() is disadvantageous
   2814 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2815 		 */
   2816 		if (wm_is_using_multiqueue(sc))
   2817 			ifp->if_transmit = wm_nq_transmit;
   2818 	} else {
   2819 		ifp->if_start = wm_start;
   2820 		/*
   2821 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2822 		 */
   2823 		if (wm_is_using_multiqueue(sc))
   2824 			ifp->if_transmit = wm_transmit;
   2825 	}
   2826 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2827 	ifp->if_init = wm_init;
   2828 	ifp->if_stop = wm_stop;
   2829 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2830 	IFQ_SET_READY(&ifp->if_snd);
   2831 
   2832 	/* Check for jumbo frame */
   2833 	switch (sc->sc_type) {
   2834 	case WM_T_82573:
   2835 		/* XXX limited to 9234 if ASPM is disabled */
   2836 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2837 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2838 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2839 		break;
   2840 	case WM_T_82571:
   2841 	case WM_T_82572:
   2842 	case WM_T_82574:
   2843 	case WM_T_82583:
   2844 	case WM_T_82575:
   2845 	case WM_T_82576:
   2846 	case WM_T_82580:
   2847 	case WM_T_I350:
   2848 	case WM_T_I354:
   2849 	case WM_T_I210:
   2850 	case WM_T_I211:
   2851 	case WM_T_80003:
   2852 	case WM_T_ICH9:
   2853 	case WM_T_ICH10:
   2854 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2855 	case WM_T_PCH_LPT:
   2856 	case WM_T_PCH_SPT:
   2857 	case WM_T_PCH_CNP:
   2858 		/* XXX limited to 9234 */
   2859 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2860 		break;
   2861 	case WM_T_PCH:
   2862 		/* XXX limited to 4096 */
   2863 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2864 		break;
   2865 	case WM_T_82542_2_0:
   2866 	case WM_T_82542_2_1:
   2867 	case WM_T_ICH8:
   2868 		/* No support for jumbo frame */
   2869 		break;
   2870 	default:
   2871 		/* ETHER_MAX_LEN_JUMBO */
   2872 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2873 		break;
   2874 	}
   2875 
   2876 	/* If we're a i82543 or greater, we can support VLANs. */
   2877 	if (sc->sc_type >= WM_T_82543)
   2878 		sc->sc_ethercom.ec_capabilities |=
   2879 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2880 
   2881 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2882 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2883 
   2884 	/*
   2885 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2886 	 * on i82543 and later.
   2887 	 */
   2888 	if (sc->sc_type >= WM_T_82543) {
   2889 		ifp->if_capabilities |=
   2890 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2891 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2892 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2893 		    IFCAP_CSUM_TCPv6_Tx |
   2894 		    IFCAP_CSUM_UDPv6_Tx;
   2895 	}
   2896 
   2897 	/*
   2898 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2899 	 *
   2900 	 *	82541GI (8086:1076) ... no
   2901 	 *	82572EI (8086:10b9) ... yes
   2902 	 */
   2903 	if (sc->sc_type >= WM_T_82571) {
   2904 		ifp->if_capabilities |=
   2905 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2906 	}
   2907 
   2908 	/*
   2909 	 * If we're a i82544 or greater (except i82547), we can do
   2910 	 * TCP segmentation offload.
   2911 	 */
   2912 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2913 		ifp->if_capabilities |= IFCAP_TSOv4;
   2914 	}
   2915 
   2916 	if (sc->sc_type >= WM_T_82571) {
   2917 		ifp->if_capabilities |= IFCAP_TSOv6;
   2918 	}
   2919 
   2920 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2921 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2922 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2923 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2924 
   2925 #ifdef WM_MPSAFE
   2926 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2927 #else
   2928 	sc->sc_core_lock = NULL;
   2929 #endif
   2930 
   2931 	/* Attach the interface. */
   2932 	error = if_initialize(ifp);
   2933 	if (error != 0) {
   2934 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2935 		    error);
   2936 		return; /* Error */
   2937 	}
   2938 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2939 	ether_ifattach(ifp, enaddr);
   2940 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2941 	if_register(ifp);
   2942 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2943 	    RND_FLAG_DEFAULT);
   2944 
   2945 #ifdef WM_EVENT_COUNTERS
   2946 	/* Attach event counters. */
   2947 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2948 	    NULL, xname, "linkintr");
   2949 
   2950 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2951 	    NULL, xname, "tx_xoff");
   2952 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2953 	    NULL, xname, "tx_xon");
   2954 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2955 	    NULL, xname, "rx_xoff");
   2956 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2957 	    NULL, xname, "rx_xon");
   2958 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2959 	    NULL, xname, "rx_macctl");
   2960 #endif /* WM_EVENT_COUNTERS */
   2961 
   2962 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2963 		pmf_class_network_register(self, ifp);
   2964 	else
   2965 		aprint_error_dev(self, "couldn't establish power handler\n");
   2966 
   2967 	sc->sc_flags |= WM_F_ATTACHED;
   2968 out:
   2969 	return;
   2970 }
   2971 
   2972 /* The detach function (ca_detach) */
   2973 static int
   2974 wm_detach(device_t self, int flags __unused)
   2975 {
   2976 	struct wm_softc *sc = device_private(self);
   2977 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2978 	int i;
   2979 
   2980 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2981 		return 0;
   2982 
   2983 	/* Stop the interface. Callouts are stopped in it. */
   2984 	wm_stop(ifp, 1);
   2985 
   2986 	pmf_device_deregister(self);
   2987 
   2988 #ifdef WM_EVENT_COUNTERS
   2989 	evcnt_detach(&sc->sc_ev_linkintr);
   2990 
   2991 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2992 	evcnt_detach(&sc->sc_ev_tx_xon);
   2993 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2994 	evcnt_detach(&sc->sc_ev_rx_xon);
   2995 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2996 #endif /* WM_EVENT_COUNTERS */
   2997 
   2998 	/* Tell the firmware about the release */
   2999 	WM_CORE_LOCK(sc);
   3000 	wm_release_manageability(sc);
   3001 	wm_release_hw_control(sc);
   3002 	wm_enable_wakeup(sc);
   3003 	WM_CORE_UNLOCK(sc);
   3004 
   3005 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3006 
   3007 	/* Delete all remaining media. */
   3008 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3009 
   3010 	ether_ifdetach(ifp);
   3011 	if_detach(ifp);
   3012 	if_percpuq_destroy(sc->sc_ipq);
   3013 
   3014 	/* Unload RX dmamaps and free mbufs */
   3015 	for (i = 0; i < sc->sc_nqueues; i++) {
   3016 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3017 		mutex_enter(rxq->rxq_lock);
   3018 		wm_rxdrain(rxq);
   3019 		mutex_exit(rxq->rxq_lock);
   3020 	}
   3021 	/* Must unlock here */
   3022 
   3023 	/* Disestablish the interrupt handler */
   3024 	for (i = 0; i < sc->sc_nintrs; i++) {
   3025 		if (sc->sc_ihs[i] != NULL) {
   3026 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3027 			sc->sc_ihs[i] = NULL;
   3028 		}
   3029 	}
   3030 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3031 
   3032 	wm_free_txrx_queues(sc);
   3033 
   3034 	/* Unmap the registers */
   3035 	if (sc->sc_ss) {
   3036 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3037 		sc->sc_ss = 0;
   3038 	}
   3039 	if (sc->sc_ios) {
   3040 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3041 		sc->sc_ios = 0;
   3042 	}
   3043 	if (sc->sc_flashs) {
   3044 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3045 		sc->sc_flashs = 0;
   3046 	}
   3047 
   3048 	if (sc->sc_core_lock)
   3049 		mutex_obj_free(sc->sc_core_lock);
   3050 	if (sc->sc_ich_phymtx)
   3051 		mutex_obj_free(sc->sc_ich_phymtx);
   3052 	if (sc->sc_ich_nvmmtx)
   3053 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3054 
   3055 	return 0;
   3056 }
   3057 
   3058 static bool
   3059 wm_suspend(device_t self, const pmf_qual_t *qual)
   3060 {
   3061 	struct wm_softc *sc = device_private(self);
   3062 
   3063 	wm_release_manageability(sc);
   3064 	wm_release_hw_control(sc);
   3065 	wm_enable_wakeup(sc);
   3066 
   3067 	return true;
   3068 }
   3069 
   3070 static bool
   3071 wm_resume(device_t self, const pmf_qual_t *qual)
   3072 {
   3073 	struct wm_softc *sc = device_private(self);
   3074 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3075 	pcireg_t reg;
   3076 	char buf[256];
   3077 
   3078 	reg = CSR_READ(sc, WMREG_WUS);
   3079 	if (reg != 0) {
   3080 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3081 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3082 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3083 	}
   3084 
   3085 	if (sc->sc_type >= WM_T_PCH2)
   3086 		wm_resume_workarounds_pchlan(sc);
   3087 	if ((ifp->if_flags & IFF_UP) == 0) {
   3088 		wm_reset(sc);
   3089 		/* Non-AMT based hardware can now take control from firmware */
   3090 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3091 			wm_get_hw_control(sc);
   3092 		wm_init_manageability(sc);
   3093 	} else {
   3094 		/*
   3095 		 * We called pmf_class_network_register(), so if_init() is
   3096 		 * automatically called when IFF_UP. wm_reset(),
   3097 		 * wm_get_hw_control() and wm_init_manageability() are called
   3098 		 * via wm_init().
   3099 		 */
   3100 	}
   3101 
   3102 	return true;
   3103 }
   3104 
   3105 /*
   3106  * wm_watchdog:		[ifnet interface function]
   3107  *
   3108  *	Watchdog timer handler.
   3109  */
   3110 static void
   3111 wm_watchdog(struct ifnet *ifp)
   3112 {
   3113 	int qid;
   3114 	struct wm_softc *sc = ifp->if_softc;
   3115 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3116 
   3117 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3118 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3119 
   3120 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3121 	}
   3122 
   3123 	/*
   3124 	 * IF any of queues hanged up, reset the interface.
   3125 	 */
   3126 	if (hang_queue != 0) {
   3127 		(void) wm_init(ifp);
   3128 
   3129 		/*
   3130 		 * There are still some upper layer processing which call
   3131 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3132 		 */
   3133 		/* Try to get more packets going. */
   3134 		ifp->if_start(ifp);
   3135 	}
   3136 }
   3137 
   3138 
   3139 static void
   3140 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3141 {
   3142 
   3143 	mutex_enter(txq->txq_lock);
   3144 	if (txq->txq_sending &&
   3145 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3146 		wm_watchdog_txq_locked(ifp, txq, hang);
   3147 	}
   3148 	mutex_exit(txq->txq_lock);
   3149 }
   3150 
   3151 static void
   3152 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3153     uint16_t *hang)
   3154 {
   3155 	struct wm_softc *sc = ifp->if_softc;
   3156 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3157 
   3158 	KASSERT(mutex_owned(txq->txq_lock));
   3159 
   3160 	/*
   3161 	 * Since we're using delayed interrupts, sweep up
   3162 	 * before we report an error.
   3163 	 */
   3164 	wm_txeof(txq, UINT_MAX);
   3165 
   3166 	if (txq->txq_sending)
   3167 		*hang |= __BIT(wmq->wmq_id);
   3168 
   3169 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3170 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3171 		    device_xname(sc->sc_dev));
   3172 	} else {
   3173 #ifdef WM_DEBUG
   3174 		int i, j;
   3175 		struct wm_txsoft *txs;
   3176 #endif
   3177 		log(LOG_ERR,
   3178 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3179 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3180 		    txq->txq_next);
   3181 		ifp->if_oerrors++;
   3182 #ifdef WM_DEBUG
   3183 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3184 		    i = WM_NEXTTXS(txq, i)) {
   3185 		    txs = &txq->txq_soft[i];
   3186 		    printf("txs %d tx %d -> %d\n",
   3187 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3188 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3189 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3190 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3191 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3192 				    printf("\t %#08x%08x\n",
   3193 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3194 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3195 			    } else {
   3196 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3197 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3198 					txq->txq_descs[j].wtx_addr.wa_low);
   3199 				    printf("\t %#04x%02x%02x%08x\n",
   3200 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3201 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3202 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3203 					txq->txq_descs[j].wtx_cmdlen);
   3204 			    }
   3205 			if (j == txs->txs_lastdesc)
   3206 				break;
   3207 			}
   3208 		}
   3209 #endif
   3210 	}
   3211 }
   3212 
   3213 /*
   3214  * wm_tick:
   3215  *
   3216  *	One second timer, used to check link status, sweep up
   3217  *	completed transmit jobs, etc.
   3218  */
   3219 static void
   3220 wm_tick(void *arg)
   3221 {
   3222 	struct wm_softc *sc = arg;
   3223 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3224 #ifndef WM_MPSAFE
   3225 	int s = splnet();
   3226 #endif
   3227 
   3228 	WM_CORE_LOCK(sc);
   3229 
   3230 	if (sc->sc_core_stopping) {
   3231 		WM_CORE_UNLOCK(sc);
   3232 #ifndef WM_MPSAFE
   3233 		splx(s);
   3234 #endif
   3235 		return;
   3236 	}
   3237 
   3238 	if (sc->sc_type >= WM_T_82542_2_1) {
   3239 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3240 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3241 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3242 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3243 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3244 	}
   3245 
   3246 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3247 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3248 	    + CSR_READ(sc, WMREG_CRCERRS)
   3249 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3250 	    + CSR_READ(sc, WMREG_SYMERRC)
   3251 	    + CSR_READ(sc, WMREG_RXERRC)
   3252 	    + CSR_READ(sc, WMREG_SEC)
   3253 	    + CSR_READ(sc, WMREG_CEXTERR)
   3254 	    + CSR_READ(sc, WMREG_RLEC);
   3255 	/*
   3256 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3257 	 * memory. It does not mean the number of dropped packet. Because
   3258 	 * ethernet controller can receive packets in such case if there is
   3259 	 * space in phy's FIFO.
   3260 	 *
   3261 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3262 	 * own EVCNT instead of if_iqdrops.
   3263 	 */
   3264 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3265 
   3266 	if (sc->sc_flags & WM_F_HAS_MII)
   3267 		mii_tick(&sc->sc_mii);
   3268 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3269 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3270 		wm_serdes_tick(sc);
   3271 	else
   3272 		wm_tbi_tick(sc);
   3273 
   3274 	WM_CORE_UNLOCK(sc);
   3275 
   3276 	wm_watchdog(ifp);
   3277 
   3278 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3279 }
   3280 
   3281 static int
   3282 wm_ifflags_cb(struct ethercom *ec)
   3283 {
   3284 	struct ifnet *ifp = &ec->ec_if;
   3285 	struct wm_softc *sc = ifp->if_softc;
   3286 	int iffchange, ecchange;
   3287 	bool needreset = false;
   3288 	int rc = 0;
   3289 
   3290 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3291 		device_xname(sc->sc_dev), __func__));
   3292 
   3293 	WM_CORE_LOCK(sc);
   3294 
   3295 	/*
   3296 	 * Check for if_flags.
   3297 	 * Main usage is to prevent linkdown when opening bpf.
   3298 	 */
   3299 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3300 	sc->sc_if_flags = ifp->if_flags;
   3301 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3302 		needreset = true;
   3303 		goto ec;
   3304 	}
   3305 
   3306 	/* iff related updates */
   3307 	if ((iffchange & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3308 		wm_set_filter(sc);
   3309 
   3310 	wm_set_vlan(sc);
   3311 
   3312 ec:
   3313 	/* Check for ec_capenable. */
   3314 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3315 	sc->sc_ec_capenable = ec->ec_capenable;
   3316 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3317 		needreset = true;
   3318 		goto out;
   3319 	}
   3320 
   3321 	/* ec related updates */
   3322 	wm_set_eee(sc);
   3323 
   3324 out:
   3325 	if (needreset)
   3326 		rc = ENETRESET;
   3327 	WM_CORE_UNLOCK(sc);
   3328 
   3329 	return rc;
   3330 }
   3331 
   3332 /*
   3333  * wm_ioctl:		[ifnet interface function]
   3334  *
   3335  *	Handle control requests from the operator.
   3336  */
   3337 static int
   3338 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3339 {
   3340 	struct wm_softc *sc = ifp->if_softc;
   3341 	struct ifreq *ifr = (struct ifreq *) data;
   3342 	struct ifaddr *ifa = (struct ifaddr *)data;
   3343 	struct sockaddr_dl *sdl;
   3344 	int s, error;
   3345 
   3346 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3347 		device_xname(sc->sc_dev), __func__));
   3348 
   3349 #ifndef WM_MPSAFE
   3350 	s = splnet();
   3351 #endif
   3352 	switch (cmd) {
   3353 	case SIOCSIFMEDIA:
   3354 	case SIOCGIFMEDIA:
   3355 		WM_CORE_LOCK(sc);
   3356 		/* Flow control requires full-duplex mode. */
   3357 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3358 		    (ifr->ifr_media & IFM_FDX) == 0)
   3359 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3360 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3361 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3362 				/* We can do both TXPAUSE and RXPAUSE. */
   3363 				ifr->ifr_media |=
   3364 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3365 			}
   3366 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3367 		}
   3368 		WM_CORE_UNLOCK(sc);
   3369 #ifdef WM_MPSAFE
   3370 		s = splnet();
   3371 #endif
   3372 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3373 #ifdef WM_MPSAFE
   3374 		splx(s);
   3375 #endif
   3376 		break;
   3377 	case SIOCINITIFADDR:
   3378 		WM_CORE_LOCK(sc);
   3379 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3380 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3381 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3382 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3383 			/* unicast address is first multicast entry */
   3384 			wm_set_filter(sc);
   3385 			error = 0;
   3386 			WM_CORE_UNLOCK(sc);
   3387 			break;
   3388 		}
   3389 		WM_CORE_UNLOCK(sc);
   3390 		/*FALLTHROUGH*/
   3391 	default:
   3392 #ifdef WM_MPSAFE
   3393 		s = splnet();
   3394 #endif
   3395 		/* It may call wm_start, so unlock here */
   3396 		error = ether_ioctl(ifp, cmd, data);
   3397 #ifdef WM_MPSAFE
   3398 		splx(s);
   3399 #endif
   3400 		if (error != ENETRESET)
   3401 			break;
   3402 
   3403 		error = 0;
   3404 
   3405 		if (cmd == SIOCSIFCAP)
   3406 			error = (*ifp->if_init)(ifp);
   3407 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3408 			;
   3409 		else if (ifp->if_flags & IFF_RUNNING) {
   3410 			/*
   3411 			 * Multicast list has changed; set the hardware filter
   3412 			 * accordingly.
   3413 			 */
   3414 			WM_CORE_LOCK(sc);
   3415 			wm_set_filter(sc);
   3416 			WM_CORE_UNLOCK(sc);
   3417 		}
   3418 		break;
   3419 	}
   3420 
   3421 #ifndef WM_MPSAFE
   3422 	splx(s);
   3423 #endif
   3424 	return error;
   3425 }
   3426 
   3427 /* MAC address related */
   3428 
   3429 /*
   3430  * Get the offset of MAC address and return it.
   3431  * If error occured, use offset 0.
   3432  */
   3433 static uint16_t
   3434 wm_check_alt_mac_addr(struct wm_softc *sc)
   3435 {
   3436 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3437 	uint16_t offset = NVM_OFF_MACADDR;
   3438 
   3439 	/* Try to read alternative MAC address pointer */
   3440 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3441 		return 0;
   3442 
   3443 	/* Check pointer if it's valid or not. */
   3444 	if ((offset == 0x0000) || (offset == 0xffff))
   3445 		return 0;
   3446 
   3447 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3448 	/*
   3449 	 * Check whether alternative MAC address is valid or not.
   3450 	 * Some cards have non 0xffff pointer but those don't use
   3451 	 * alternative MAC address in reality.
   3452 	 *
   3453 	 * Check whether the broadcast bit is set or not.
   3454 	 */
   3455 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3456 		if (((myea[0] & 0xff) & 0x01) == 0)
   3457 			return offset; /* Found */
   3458 
   3459 	/* Not found */
   3460 	return 0;
   3461 }
   3462 
   3463 static int
   3464 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3465 {
   3466 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3467 	uint16_t offset = NVM_OFF_MACADDR;
   3468 	int do_invert = 0;
   3469 
   3470 	switch (sc->sc_type) {
   3471 	case WM_T_82580:
   3472 	case WM_T_I350:
   3473 	case WM_T_I354:
   3474 		/* EEPROM Top Level Partitioning */
   3475 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3476 		break;
   3477 	case WM_T_82571:
   3478 	case WM_T_82575:
   3479 	case WM_T_82576:
   3480 	case WM_T_80003:
   3481 	case WM_T_I210:
   3482 	case WM_T_I211:
   3483 		offset = wm_check_alt_mac_addr(sc);
   3484 		if (offset == 0)
   3485 			if ((sc->sc_funcid & 0x01) == 1)
   3486 				do_invert = 1;
   3487 		break;
   3488 	default:
   3489 		if ((sc->sc_funcid & 0x01) == 1)
   3490 			do_invert = 1;
   3491 		break;
   3492 	}
   3493 
   3494 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3495 		goto bad;
   3496 
   3497 	enaddr[0] = myea[0] & 0xff;
   3498 	enaddr[1] = myea[0] >> 8;
   3499 	enaddr[2] = myea[1] & 0xff;
   3500 	enaddr[3] = myea[1] >> 8;
   3501 	enaddr[4] = myea[2] & 0xff;
   3502 	enaddr[5] = myea[2] >> 8;
   3503 
   3504 	/*
   3505 	 * Toggle the LSB of the MAC address on the second port
   3506 	 * of some dual port cards.
   3507 	 */
   3508 	if (do_invert != 0)
   3509 		enaddr[5] ^= 1;
   3510 
   3511 	return 0;
   3512 
   3513  bad:
   3514 	return -1;
   3515 }
   3516 
   3517 /*
   3518  * wm_set_ral:
   3519  *
   3520  *	Set an entery in the receive address list.
   3521  */
   3522 static void
   3523 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3524 {
   3525 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3526 	uint32_t wlock_mac;
   3527 	int rv;
   3528 
   3529 	if (enaddr != NULL) {
   3530 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3531 		    (enaddr[3] << 24);
   3532 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3533 		ral_hi |= RAL_AV;
   3534 	} else {
   3535 		ral_lo = 0;
   3536 		ral_hi = 0;
   3537 	}
   3538 
   3539 	switch (sc->sc_type) {
   3540 	case WM_T_82542_2_0:
   3541 	case WM_T_82542_2_1:
   3542 	case WM_T_82543:
   3543 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3544 		CSR_WRITE_FLUSH(sc);
   3545 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3546 		CSR_WRITE_FLUSH(sc);
   3547 		break;
   3548 	case WM_T_PCH2:
   3549 	case WM_T_PCH_LPT:
   3550 	case WM_T_PCH_SPT:
   3551 	case WM_T_PCH_CNP:
   3552 		if (idx == 0) {
   3553 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3554 			CSR_WRITE_FLUSH(sc);
   3555 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3556 			CSR_WRITE_FLUSH(sc);
   3557 			return;
   3558 		}
   3559 		if (sc->sc_type != WM_T_PCH2) {
   3560 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3561 			    FWSM_WLOCK_MAC);
   3562 			addrl = WMREG_SHRAL(idx - 1);
   3563 			addrh = WMREG_SHRAH(idx - 1);
   3564 		} else {
   3565 			wlock_mac = 0;
   3566 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3567 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3568 		}
   3569 
   3570 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3571 			rv = wm_get_swflag_ich8lan(sc);
   3572 			if (rv != 0)
   3573 				return;
   3574 			CSR_WRITE(sc, addrl, ral_lo);
   3575 			CSR_WRITE_FLUSH(sc);
   3576 			CSR_WRITE(sc, addrh, ral_hi);
   3577 			CSR_WRITE_FLUSH(sc);
   3578 			wm_put_swflag_ich8lan(sc);
   3579 		}
   3580 
   3581 		break;
   3582 	default:
   3583 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3584 		CSR_WRITE_FLUSH(sc);
   3585 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3586 		CSR_WRITE_FLUSH(sc);
   3587 		break;
   3588 	}
   3589 }
   3590 
   3591 /*
   3592  * wm_mchash:
   3593  *
   3594  *	Compute the hash of the multicast address for the 4096-bit
   3595  *	multicast filter.
   3596  */
   3597 static uint32_t
   3598 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3599 {
   3600 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3601 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3602 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3603 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3604 	uint32_t hash;
   3605 
   3606 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3607 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3608 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3609 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3610 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3611 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3612 		return (hash & 0x3ff);
   3613 	}
   3614 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3615 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3616 
   3617 	return (hash & 0xfff);
   3618 }
   3619 
   3620 /*
   3621  *
   3622  *
   3623  */
   3624 static int
   3625 wm_rar_count(struct wm_softc *sc)
   3626 {
   3627 	int size;
   3628 
   3629 	switch (sc->sc_type) {
   3630 	case WM_T_ICH8:
   3631 		size = WM_RAL_TABSIZE_ICH8 -1;
   3632 		break;
   3633 	case WM_T_ICH9:
   3634 	case WM_T_ICH10:
   3635 	case WM_T_PCH:
   3636 		size = WM_RAL_TABSIZE_ICH8;
   3637 		break;
   3638 	case WM_T_PCH2:
   3639 		size = WM_RAL_TABSIZE_PCH2;
   3640 		break;
   3641 	case WM_T_PCH_LPT:
   3642 	case WM_T_PCH_SPT:
   3643 	case WM_T_PCH_CNP:
   3644 		size = WM_RAL_TABSIZE_PCH_LPT;
   3645 		break;
   3646 	case WM_T_82575:
   3647 	case WM_T_I210:
   3648 	case WM_T_I211:
   3649 		size = WM_RAL_TABSIZE_82575;
   3650 		break;
   3651 	case WM_T_82576:
   3652 	case WM_T_82580:
   3653 		size = WM_RAL_TABSIZE_82576;
   3654 		break;
   3655 	case WM_T_I350:
   3656 	case WM_T_I354:
   3657 		size = WM_RAL_TABSIZE_I350;
   3658 		break;
   3659 	default:
   3660 		size = WM_RAL_TABSIZE;
   3661 	}
   3662 
   3663 	return size;
   3664 }
   3665 
   3666 /*
   3667  * wm_set_filter:
   3668  *
   3669  *	Set up the receive filter.
   3670  */
   3671 static void
   3672 wm_set_filter(struct wm_softc *sc)
   3673 {
   3674 	struct ethercom *ec = &sc->sc_ethercom;
   3675 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3676 	struct ether_multi *enm;
   3677 	struct ether_multistep step;
   3678 	bus_addr_t mta_reg;
   3679 	uint32_t hash, reg, bit;
   3680 	int i, size, ralmax;
   3681 
   3682 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3683 		device_xname(sc->sc_dev), __func__));
   3684 
   3685 	if (sc->sc_type >= WM_T_82544)
   3686 		mta_reg = WMREG_CORDOVA_MTA;
   3687 	else
   3688 		mta_reg = WMREG_MTA;
   3689 
   3690 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3691 
   3692 	if (ifp->if_flags & IFF_BROADCAST)
   3693 		sc->sc_rctl |= RCTL_BAM;
   3694 	if (ifp->if_flags & IFF_PROMISC) {
   3695 		sc->sc_rctl |= RCTL_UPE;
   3696 		goto allmulti;
   3697 	}
   3698 
   3699 	/*
   3700 	 * Set the station address in the first RAL slot, and
   3701 	 * clear the remaining slots.
   3702 	 */
   3703 	size = wm_rar_count(sc);
   3704 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3705 
   3706 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3707 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3708 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3709 		switch (i) {
   3710 		case 0:
   3711 			/* We can use all entries */
   3712 			ralmax = size;
   3713 			break;
   3714 		case 1:
   3715 			/* Only RAR[0] */
   3716 			ralmax = 1;
   3717 			break;
   3718 		default:
   3719 			/* available SHRA + RAR[0] */
   3720 			ralmax = i + 1;
   3721 		}
   3722 	} else
   3723 		ralmax = size;
   3724 	for (i = 1; i < size; i++) {
   3725 		if (i < ralmax)
   3726 			wm_set_ral(sc, NULL, i);
   3727 	}
   3728 
   3729 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3730 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3731 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3732 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3733 		size = WM_ICH8_MC_TABSIZE;
   3734 	else
   3735 		size = WM_MC_TABSIZE;
   3736 	/* Clear out the multicast table. */
   3737 	for (i = 0; i < size; i++) {
   3738 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3739 		CSR_WRITE_FLUSH(sc);
   3740 	}
   3741 
   3742 	ETHER_LOCK(ec);
   3743 	ETHER_FIRST_MULTI(step, ec, enm);
   3744 	while (enm != NULL) {
   3745 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3746 			ETHER_UNLOCK(ec);
   3747 			/*
   3748 			 * We must listen to a range of multicast addresses.
   3749 			 * For now, just accept all multicasts, rather than
   3750 			 * trying to set only those filter bits needed to match
   3751 			 * the range.  (At this time, the only use of address
   3752 			 * ranges is for IP multicast routing, for which the
   3753 			 * range is big enough to require all bits set.)
   3754 			 */
   3755 			goto allmulti;
   3756 		}
   3757 
   3758 		hash = wm_mchash(sc, enm->enm_addrlo);
   3759 
   3760 		reg = (hash >> 5);
   3761 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3762 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3763 		    || (sc->sc_type == WM_T_PCH2)
   3764 		    || (sc->sc_type == WM_T_PCH_LPT)
   3765 		    || (sc->sc_type == WM_T_PCH_SPT)
   3766 		    || (sc->sc_type == WM_T_PCH_CNP))
   3767 			reg &= 0x1f;
   3768 		else
   3769 			reg &= 0x7f;
   3770 		bit = hash & 0x1f;
   3771 
   3772 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3773 		hash |= 1U << bit;
   3774 
   3775 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3776 			/*
   3777 			 * 82544 Errata 9: Certain register cannot be written
   3778 			 * with particular alignments in PCI-X bus operation
   3779 			 * (FCAH, MTA and VFTA).
   3780 			 */
   3781 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3782 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3783 			CSR_WRITE_FLUSH(sc);
   3784 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3785 			CSR_WRITE_FLUSH(sc);
   3786 		} else {
   3787 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3788 			CSR_WRITE_FLUSH(sc);
   3789 		}
   3790 
   3791 		ETHER_NEXT_MULTI(step, enm);
   3792 	}
   3793 	ETHER_UNLOCK(ec);
   3794 
   3795 	ifp->if_flags &= ~IFF_ALLMULTI;
   3796 	goto setit;
   3797 
   3798  allmulti:
   3799 	ifp->if_flags |= IFF_ALLMULTI;
   3800 	sc->sc_rctl |= RCTL_MPE;
   3801 
   3802  setit:
   3803 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3804 }
   3805 
   3806 /* Reset and init related */
   3807 
   3808 static void
   3809 wm_set_vlan(struct wm_softc *sc)
   3810 {
   3811 
   3812 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3813 		device_xname(sc->sc_dev), __func__));
   3814 
   3815 	/* Deal with VLAN enables. */
   3816 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3817 		sc->sc_ctrl |= CTRL_VME;
   3818 	else
   3819 		sc->sc_ctrl &= ~CTRL_VME;
   3820 
   3821 	/* Write the control registers. */
   3822 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3823 }
   3824 
   3825 static void
   3826 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3827 {
   3828 	uint32_t gcr;
   3829 	pcireg_t ctrl2;
   3830 
   3831 	gcr = CSR_READ(sc, WMREG_GCR);
   3832 
   3833 	/* Only take action if timeout value is defaulted to 0 */
   3834 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3835 		goto out;
   3836 
   3837 	if ((gcr & GCR_CAP_VER2) == 0) {
   3838 		gcr |= GCR_CMPL_TMOUT_10MS;
   3839 		goto out;
   3840 	}
   3841 
   3842 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3843 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3844 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3845 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3846 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3847 
   3848 out:
   3849 	/* Disable completion timeout resend */
   3850 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3851 
   3852 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3853 }
   3854 
   3855 void
   3856 wm_get_auto_rd_done(struct wm_softc *sc)
   3857 {
   3858 	int i;
   3859 
   3860 	/* wait for eeprom to reload */
   3861 	switch (sc->sc_type) {
   3862 	case WM_T_82571:
   3863 	case WM_T_82572:
   3864 	case WM_T_82573:
   3865 	case WM_T_82574:
   3866 	case WM_T_82583:
   3867 	case WM_T_82575:
   3868 	case WM_T_82576:
   3869 	case WM_T_82580:
   3870 	case WM_T_I350:
   3871 	case WM_T_I354:
   3872 	case WM_T_I210:
   3873 	case WM_T_I211:
   3874 	case WM_T_80003:
   3875 	case WM_T_ICH8:
   3876 	case WM_T_ICH9:
   3877 		for (i = 0; i < 10; i++) {
   3878 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3879 				break;
   3880 			delay(1000);
   3881 		}
   3882 		if (i == 10) {
   3883 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3884 			    "complete\n", device_xname(sc->sc_dev));
   3885 		}
   3886 		break;
   3887 	default:
   3888 		break;
   3889 	}
   3890 }
   3891 
   3892 void
   3893 wm_lan_init_done(struct wm_softc *sc)
   3894 {
   3895 	uint32_t reg = 0;
   3896 	int i;
   3897 
   3898 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3899 		device_xname(sc->sc_dev), __func__));
   3900 
   3901 	/* Wait for eeprom to reload */
   3902 	switch (sc->sc_type) {
   3903 	case WM_T_ICH10:
   3904 	case WM_T_PCH:
   3905 	case WM_T_PCH2:
   3906 	case WM_T_PCH_LPT:
   3907 	case WM_T_PCH_SPT:
   3908 	case WM_T_PCH_CNP:
   3909 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3910 			reg = CSR_READ(sc, WMREG_STATUS);
   3911 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3912 				break;
   3913 			delay(100);
   3914 		}
   3915 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3916 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3917 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3918 		}
   3919 		break;
   3920 	default:
   3921 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3922 		    __func__);
   3923 		break;
   3924 	}
   3925 
   3926 	reg &= ~STATUS_LAN_INIT_DONE;
   3927 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3928 }
   3929 
   3930 void
   3931 wm_get_cfg_done(struct wm_softc *sc)
   3932 {
   3933 	int mask;
   3934 	uint32_t reg;
   3935 	int i;
   3936 
   3937 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3938 		device_xname(sc->sc_dev), __func__));
   3939 
   3940 	/* Wait for eeprom to reload */
   3941 	switch (sc->sc_type) {
   3942 	case WM_T_82542_2_0:
   3943 	case WM_T_82542_2_1:
   3944 		/* null */
   3945 		break;
   3946 	case WM_T_82543:
   3947 	case WM_T_82544:
   3948 	case WM_T_82540:
   3949 	case WM_T_82545:
   3950 	case WM_T_82545_3:
   3951 	case WM_T_82546:
   3952 	case WM_T_82546_3:
   3953 	case WM_T_82541:
   3954 	case WM_T_82541_2:
   3955 	case WM_T_82547:
   3956 	case WM_T_82547_2:
   3957 	case WM_T_82573:
   3958 	case WM_T_82574:
   3959 	case WM_T_82583:
   3960 		/* generic */
   3961 		delay(10*1000);
   3962 		break;
   3963 	case WM_T_80003:
   3964 	case WM_T_82571:
   3965 	case WM_T_82572:
   3966 	case WM_T_82575:
   3967 	case WM_T_82576:
   3968 	case WM_T_82580:
   3969 	case WM_T_I350:
   3970 	case WM_T_I354:
   3971 	case WM_T_I210:
   3972 	case WM_T_I211:
   3973 		if (sc->sc_type == WM_T_82571) {
   3974 			/* Only 82571 shares port 0 */
   3975 			mask = EEMNGCTL_CFGDONE_0;
   3976 		} else
   3977 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3978 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3979 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3980 				break;
   3981 			delay(1000);
   3982 		}
   3983 		if (i >= WM_PHY_CFG_TIMEOUT)
   3984 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3985 				device_xname(sc->sc_dev), __func__));
   3986 		break;
   3987 	case WM_T_ICH8:
   3988 	case WM_T_ICH9:
   3989 	case WM_T_ICH10:
   3990 	case WM_T_PCH:
   3991 	case WM_T_PCH2:
   3992 	case WM_T_PCH_LPT:
   3993 	case WM_T_PCH_SPT:
   3994 	case WM_T_PCH_CNP:
   3995 		delay(10*1000);
   3996 		if (sc->sc_type >= WM_T_ICH10)
   3997 			wm_lan_init_done(sc);
   3998 		else
   3999 			wm_get_auto_rd_done(sc);
   4000 
   4001 		/* Clear PHY Reset Asserted bit */
   4002 		reg = CSR_READ(sc, WMREG_STATUS);
   4003 		if ((reg & STATUS_PHYRA) != 0)
   4004 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4005 		break;
   4006 	default:
   4007 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4008 		    __func__);
   4009 		break;
   4010 	}
   4011 }
   4012 
   4013 int
   4014 wm_phy_post_reset(struct wm_softc *sc)
   4015 {
   4016 	device_t dev = sc->sc_dev;
   4017 	uint16_t reg;
   4018 	int rv = 0;
   4019 
   4020 	/* This function is only for ICH8 and newer. */
   4021 	if (sc->sc_type < WM_T_ICH8)
   4022 		return 0;
   4023 
   4024 	if (wm_phy_resetisblocked(sc)) {
   4025 		/* XXX */
   4026 		device_printf(dev, "PHY is blocked\n");
   4027 		return -1;
   4028 	}
   4029 
   4030 	/* Allow time for h/w to get to quiescent state after reset */
   4031 	delay(10*1000);
   4032 
   4033 	/* Perform any necessary post-reset workarounds */
   4034 	if (sc->sc_type == WM_T_PCH)
   4035 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4036 	else if (sc->sc_type == WM_T_PCH2)
   4037 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4038 	if (rv != 0)
   4039 		return rv;
   4040 
   4041 	/* Clear the host wakeup bit after lcd reset */
   4042 	if (sc->sc_type >= WM_T_PCH) {
   4043 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4044 		reg &= ~BM_WUC_HOST_WU_BIT;
   4045 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4046 	}
   4047 
   4048 	/* Configure the LCD with the extended configuration region in NVM */
   4049 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4050 		return rv;
   4051 
   4052 	/* Configure the LCD with the OEM bits in NVM */
   4053 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4054 
   4055 	if (sc->sc_type == WM_T_PCH2) {
   4056 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4057 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4058 			delay(10 * 1000);
   4059 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4060 		}
   4061 		/* Set EEE LPI Update Timer to 200usec */
   4062 		rv = sc->phy.acquire(sc);
   4063 		if (rv)
   4064 			return rv;
   4065 		rv = wm_write_emi_reg_locked(dev,
   4066 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4067 		sc->phy.release(sc);
   4068 	}
   4069 
   4070 	return rv;
   4071 }
   4072 
   4073 /* Only for PCH and newer */
   4074 static int
   4075 wm_write_smbus_addr(struct wm_softc *sc)
   4076 {
   4077 	uint32_t strap, freq;
   4078 	uint16_t phy_data;
   4079 	int rv;
   4080 
   4081 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4082 		device_xname(sc->sc_dev), __func__));
   4083 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4084 
   4085 	strap = CSR_READ(sc, WMREG_STRAP);
   4086 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4087 
   4088 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4089 	if (rv != 0)
   4090 		return -1;
   4091 
   4092 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4093 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4094 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4095 
   4096 	if (sc->sc_phytype == WMPHY_I217) {
   4097 		/* Restore SMBus frequency */
   4098 		if (freq --) {
   4099 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4100 			    | HV_SMB_ADDR_FREQ_HIGH);
   4101 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4102 			    HV_SMB_ADDR_FREQ_LOW);
   4103 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4104 			    HV_SMB_ADDR_FREQ_HIGH);
   4105 		} else
   4106 			DPRINTF(WM_DEBUG_INIT,
   4107 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4108 				device_xname(sc->sc_dev), __func__));
   4109 	}
   4110 
   4111 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4112 	    phy_data);
   4113 }
   4114 
   4115 static int
   4116 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4117 {
   4118 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4119 	uint16_t phy_page = 0;
   4120 	int rv = 0;
   4121 
   4122 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4123 		device_xname(sc->sc_dev), __func__));
   4124 
   4125 	switch (sc->sc_type) {
   4126 	case WM_T_ICH8:
   4127 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4128 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4129 			return 0;
   4130 
   4131 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4132 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4133 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4134 			break;
   4135 		}
   4136 		/* FALLTHROUGH */
   4137 	case WM_T_PCH:
   4138 	case WM_T_PCH2:
   4139 	case WM_T_PCH_LPT:
   4140 	case WM_T_PCH_SPT:
   4141 	case WM_T_PCH_CNP:
   4142 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4143 		break;
   4144 	default:
   4145 		return 0;
   4146 	}
   4147 
   4148 	if ((rv = sc->phy.acquire(sc)) != 0)
   4149 		return rv;
   4150 
   4151 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4152 	if ((reg & sw_cfg_mask) == 0)
   4153 		goto release;
   4154 
   4155 	/*
   4156 	 * Make sure HW does not configure LCD from PHY extended configuration
   4157 	 * before SW configuration
   4158 	 */
   4159 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4160 	if ((sc->sc_type < WM_T_PCH2)
   4161 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4162 		goto release;
   4163 
   4164 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4165 		device_xname(sc->sc_dev), __func__));
   4166 	/* word_addr is in DWORD */
   4167 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4168 
   4169 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4170 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4171 	if (cnf_size == 0)
   4172 		goto release;
   4173 
   4174 	if (((sc->sc_type == WM_T_PCH)
   4175 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4176 	    || (sc->sc_type > WM_T_PCH)) {
   4177 		/*
   4178 		 * HW configures the SMBus address and LEDs when the OEM and
   4179 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4180 		 * are cleared, SW will configure them instead.
   4181 		 */
   4182 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4183 			device_xname(sc->sc_dev), __func__));
   4184 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4185 			goto release;
   4186 
   4187 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4188 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4189 		    (uint16_t)reg);
   4190 		if (rv != 0)
   4191 			goto release;
   4192 	}
   4193 
   4194 	/* Configure LCD from extended configuration region. */
   4195 	for (i = 0; i < cnf_size; i++) {
   4196 		uint16_t reg_data, reg_addr;
   4197 
   4198 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4199 			goto release;
   4200 
   4201 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4202 			goto release;
   4203 
   4204 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4205 			phy_page = reg_data;
   4206 
   4207 		reg_addr &= IGPHY_MAXREGADDR;
   4208 		reg_addr |= phy_page;
   4209 
   4210 		KASSERT(sc->phy.writereg_locked != NULL);
   4211 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4212 		    reg_data);
   4213 	}
   4214 
   4215 release:
   4216 	sc->phy.release(sc);
   4217 	return rv;
   4218 }
   4219 
   4220 /*
   4221  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4222  *  @sc:       pointer to the HW structure
   4223  *  @d0_state: boolean if entering d0 or d3 device state
   4224  *
   4225  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4226  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4227  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4228  */
   4229 int
   4230 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4231 {
   4232 	uint32_t mac_reg;
   4233 	uint16_t oem_reg;
   4234 	int rv;
   4235 
   4236 	if (sc->sc_type < WM_T_PCH)
   4237 		return 0;
   4238 
   4239 	rv = sc->phy.acquire(sc);
   4240 	if (rv != 0)
   4241 		return rv;
   4242 
   4243 	if (sc->sc_type == WM_T_PCH) {
   4244 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4245 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4246 			goto release;
   4247 	}
   4248 
   4249 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4250 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4251 		goto release;
   4252 
   4253 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4254 
   4255 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4256 	if (rv != 0)
   4257 		goto release;
   4258 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4259 
   4260 	if (d0_state) {
   4261 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4262 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4263 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4264 			oem_reg |= HV_OEM_BITS_LPLU;
   4265 	} else {
   4266 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4267 		    != 0)
   4268 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4269 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4270 		    != 0)
   4271 			oem_reg |= HV_OEM_BITS_LPLU;
   4272 	}
   4273 
   4274 	/* Set Restart auto-neg to activate the bits */
   4275 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4276 	    && (wm_phy_resetisblocked(sc) == false))
   4277 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4278 
   4279 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4280 
   4281 release:
   4282 	sc->phy.release(sc);
   4283 
   4284 	return rv;
   4285 }
   4286 
   4287 /* Init hardware bits */
   4288 void
   4289 wm_initialize_hardware_bits(struct wm_softc *sc)
   4290 {
   4291 	uint32_t tarc0, tarc1, reg;
   4292 
   4293 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4294 		device_xname(sc->sc_dev), __func__));
   4295 
   4296 	/* For 82571 variant, 80003 and ICHs */
   4297 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4298 	    || (sc->sc_type >= WM_T_80003)) {
   4299 
   4300 		/* Transmit Descriptor Control 0 */
   4301 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4302 		reg |= TXDCTL_COUNT_DESC;
   4303 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4304 
   4305 		/* Transmit Descriptor Control 1 */
   4306 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4307 		reg |= TXDCTL_COUNT_DESC;
   4308 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4309 
   4310 		/* TARC0 */
   4311 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4312 		switch (sc->sc_type) {
   4313 		case WM_T_82571:
   4314 		case WM_T_82572:
   4315 		case WM_T_82573:
   4316 		case WM_T_82574:
   4317 		case WM_T_82583:
   4318 		case WM_T_80003:
   4319 			/* Clear bits 30..27 */
   4320 			tarc0 &= ~__BITS(30, 27);
   4321 			break;
   4322 		default:
   4323 			break;
   4324 		}
   4325 
   4326 		switch (sc->sc_type) {
   4327 		case WM_T_82571:
   4328 		case WM_T_82572:
   4329 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4330 
   4331 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4332 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4333 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4334 			/* 8257[12] Errata No.7 */
   4335 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4336 
   4337 			/* TARC1 bit 28 */
   4338 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4339 				tarc1 &= ~__BIT(28);
   4340 			else
   4341 				tarc1 |= __BIT(28);
   4342 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4343 
   4344 			/*
   4345 			 * 8257[12] Errata No.13
   4346 			 * Disable Dyamic Clock Gating.
   4347 			 */
   4348 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4349 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4350 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4351 			break;
   4352 		case WM_T_82573:
   4353 		case WM_T_82574:
   4354 		case WM_T_82583:
   4355 			if ((sc->sc_type == WM_T_82574)
   4356 			    || (sc->sc_type == WM_T_82583))
   4357 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4358 
   4359 			/* Extended Device Control */
   4360 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4361 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4362 			reg |= __BIT(22);	/* Set bit 22 */
   4363 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4364 
   4365 			/* Device Control */
   4366 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4367 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4368 
   4369 			/* PCIe Control Register */
   4370 			/*
   4371 			 * 82573 Errata (unknown).
   4372 			 *
   4373 			 * 82574 Errata 25 and 82583 Errata 12
   4374 			 * "Dropped Rx Packets":
   4375 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4376 			 */
   4377 			reg = CSR_READ(sc, WMREG_GCR);
   4378 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4379 			CSR_WRITE(sc, WMREG_GCR, reg);
   4380 
   4381 			if ((sc->sc_type == WM_T_82574)
   4382 			    || (sc->sc_type == WM_T_82583)) {
   4383 				/*
   4384 				 * Document says this bit must be set for
   4385 				 * proper operation.
   4386 				 */
   4387 				reg = CSR_READ(sc, WMREG_GCR);
   4388 				reg |= __BIT(22);
   4389 				CSR_WRITE(sc, WMREG_GCR, reg);
   4390 
   4391 				/*
   4392 				 * Apply workaround for hardware errata
   4393 				 * documented in errata docs Fixes issue where
   4394 				 * some error prone or unreliable PCIe
   4395 				 * completions are occurring, particularly
   4396 				 * with ASPM enabled. Without fix, issue can
   4397 				 * cause Tx timeouts.
   4398 				 */
   4399 				reg = CSR_READ(sc, WMREG_GCR2);
   4400 				reg |= __BIT(0);
   4401 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4402 			}
   4403 			break;
   4404 		case WM_T_80003:
   4405 			/* TARC0 */
   4406 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4407 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4408 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4409 
   4410 			/* TARC1 bit 28 */
   4411 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4412 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4413 				tarc1 &= ~__BIT(28);
   4414 			else
   4415 				tarc1 |= __BIT(28);
   4416 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4417 			break;
   4418 		case WM_T_ICH8:
   4419 		case WM_T_ICH9:
   4420 		case WM_T_ICH10:
   4421 		case WM_T_PCH:
   4422 		case WM_T_PCH2:
   4423 		case WM_T_PCH_LPT:
   4424 		case WM_T_PCH_SPT:
   4425 		case WM_T_PCH_CNP:
   4426 			/* TARC0 */
   4427 			if (sc->sc_type == WM_T_ICH8) {
   4428 				/* Set TARC0 bits 29 and 28 */
   4429 				tarc0 |= __BITS(29, 28);
   4430 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4431 				tarc0 |= __BIT(29);
   4432 				/*
   4433 				 *  Drop bit 28. From Linux.
   4434 				 * See I218/I219 spec update
   4435 				 * "5. Buffer Overrun While the I219 is
   4436 				 * Processing DMA Transactions"
   4437 				 */
   4438 				tarc0 &= ~__BIT(28);
   4439 			}
   4440 			/* Set TARC0 bits 23,24,26,27 */
   4441 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4442 
   4443 			/* CTRL_EXT */
   4444 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4445 			reg |= __BIT(22);	/* Set bit 22 */
   4446 			/*
   4447 			 * Enable PHY low-power state when MAC is at D3
   4448 			 * w/o WoL
   4449 			 */
   4450 			if (sc->sc_type >= WM_T_PCH)
   4451 				reg |= CTRL_EXT_PHYPDEN;
   4452 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4453 
   4454 			/* TARC1 */
   4455 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4456 			/* bit 28 */
   4457 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4458 				tarc1 &= ~__BIT(28);
   4459 			else
   4460 				tarc1 |= __BIT(28);
   4461 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4462 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4463 
   4464 			/* Device Status */
   4465 			if (sc->sc_type == WM_T_ICH8) {
   4466 				reg = CSR_READ(sc, WMREG_STATUS);
   4467 				reg &= ~__BIT(31);
   4468 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4469 
   4470 			}
   4471 
   4472 			/* IOSFPC */
   4473 			if (sc->sc_type == WM_T_PCH_SPT) {
   4474 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4475 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4476 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4477 			}
   4478 			/*
   4479 			 * Work-around descriptor data corruption issue during
   4480 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4481 			 * capability.
   4482 			 */
   4483 			reg = CSR_READ(sc, WMREG_RFCTL);
   4484 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4485 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4486 			break;
   4487 		default:
   4488 			break;
   4489 		}
   4490 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4491 
   4492 		switch (sc->sc_type) {
   4493 		/*
   4494 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4495 		 * Avoid RSS Hash Value bug.
   4496 		 */
   4497 		case WM_T_82571:
   4498 		case WM_T_82572:
   4499 		case WM_T_82573:
   4500 		case WM_T_80003:
   4501 		case WM_T_ICH8:
   4502 			reg = CSR_READ(sc, WMREG_RFCTL);
   4503 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4504 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4505 			break;
   4506 		case WM_T_82574:
   4507 			/* use extened Rx descriptor. */
   4508 			reg = CSR_READ(sc, WMREG_RFCTL);
   4509 			reg |= WMREG_RFCTL_EXSTEN;
   4510 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4511 			break;
   4512 		default:
   4513 			break;
   4514 		}
   4515 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4516 		/*
   4517 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4518 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4519 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4520 		 * Correctly by the Device"
   4521 		 *
   4522 		 * I354(C2000) Errata AVR53:
   4523 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4524 		 * Hang"
   4525 		 */
   4526 		reg = CSR_READ(sc, WMREG_RFCTL);
   4527 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4528 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4529 	}
   4530 }
   4531 
   4532 static uint32_t
   4533 wm_rxpbs_adjust_82580(uint32_t val)
   4534 {
   4535 	uint32_t rv = 0;
   4536 
   4537 	if (val < __arraycount(wm_82580_rxpbs_table))
   4538 		rv = wm_82580_rxpbs_table[val];
   4539 
   4540 	return rv;
   4541 }
   4542 
   4543 /*
   4544  * wm_reset_phy:
   4545  *
   4546  *	generic PHY reset function.
   4547  *	Same as e1000_phy_hw_reset_generic()
   4548  */
   4549 static int
   4550 wm_reset_phy(struct wm_softc *sc)
   4551 {
   4552 	uint32_t reg;
   4553 
   4554 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4555 		device_xname(sc->sc_dev), __func__));
   4556 	if (wm_phy_resetisblocked(sc))
   4557 		return -1;
   4558 
   4559 	sc->phy.acquire(sc);
   4560 
   4561 	reg = CSR_READ(sc, WMREG_CTRL);
   4562 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4563 	CSR_WRITE_FLUSH(sc);
   4564 
   4565 	delay(sc->phy.reset_delay_us);
   4566 
   4567 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4568 	CSR_WRITE_FLUSH(sc);
   4569 
   4570 	delay(150);
   4571 
   4572 	sc->phy.release(sc);
   4573 
   4574 	wm_get_cfg_done(sc);
   4575 	wm_phy_post_reset(sc);
   4576 
   4577 	return 0;
   4578 }
   4579 
   4580 /*
   4581  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4582  * so it is enough to check sc->sc_queue[0] only.
   4583  */
   4584 static void
   4585 wm_flush_desc_rings(struct wm_softc *sc)
   4586 {
   4587 	pcireg_t preg;
   4588 	uint32_t reg;
   4589 	struct wm_txqueue *txq;
   4590 	wiseman_txdesc_t *txd;
   4591 	int nexttx;
   4592 	uint32_t rctl;
   4593 
   4594 	/* First, disable MULR fix in FEXTNVM11 */
   4595 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4596 	reg |= FEXTNVM11_DIS_MULRFIX;
   4597 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4598 
   4599 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4600 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4601 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4602 		return;
   4603 
   4604 	/* TX */
   4605 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4606 	    device_xname(sc->sc_dev), preg, reg);
   4607 	reg = CSR_READ(sc, WMREG_TCTL);
   4608 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4609 
   4610 	txq = &sc->sc_queue[0].wmq_txq;
   4611 	nexttx = txq->txq_next;
   4612 	txd = &txq->txq_descs[nexttx];
   4613 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4614 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4615 	txd->wtx_fields.wtxu_status = 0;
   4616 	txd->wtx_fields.wtxu_options = 0;
   4617 	txd->wtx_fields.wtxu_vlan = 0;
   4618 
   4619 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4620 	    BUS_SPACE_BARRIER_WRITE);
   4621 
   4622 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4623 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4624 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4625 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4626 	delay(250);
   4627 
   4628 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4629 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4630 		return;
   4631 
   4632 	/* RX */
   4633 	printf("%s: Need RX flush (reg = %08x)\n",
   4634 	    device_xname(sc->sc_dev), preg);
   4635 	rctl = CSR_READ(sc, WMREG_RCTL);
   4636 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4637 	CSR_WRITE_FLUSH(sc);
   4638 	delay(150);
   4639 
   4640 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4641 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4642 	reg &= 0xffffc000;
   4643 	/*
   4644 	 * update thresholds: prefetch threshold to 31, host threshold
   4645 	 * to 1 and make sure the granularity is "descriptors" and not
   4646 	 * "cache lines"
   4647 	 */
   4648 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4649 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4650 
   4651 	/*
   4652 	 * momentarily enable the RX ring for the changes to take
   4653 	 * effect
   4654 	 */
   4655 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4656 	CSR_WRITE_FLUSH(sc);
   4657 	delay(150);
   4658 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4659 }
   4660 
   4661 /*
   4662  * wm_reset:
   4663  *
   4664  *	Reset the i82542 chip.
   4665  */
   4666 static void
   4667 wm_reset(struct wm_softc *sc)
   4668 {
   4669 	int phy_reset = 0;
   4670 	int i, error = 0;
   4671 	uint32_t reg;
   4672 	uint16_t kmreg;
   4673 	int rv;
   4674 
   4675 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4676 		device_xname(sc->sc_dev), __func__));
   4677 	KASSERT(sc->sc_type != 0);
   4678 
   4679 	/*
   4680 	 * Allocate on-chip memory according to the MTU size.
   4681 	 * The Packet Buffer Allocation register must be written
   4682 	 * before the chip is reset.
   4683 	 */
   4684 	switch (sc->sc_type) {
   4685 	case WM_T_82547:
   4686 	case WM_T_82547_2:
   4687 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4688 		    PBA_22K : PBA_30K;
   4689 		for (i = 0; i < sc->sc_nqueues; i++) {
   4690 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4691 			txq->txq_fifo_head = 0;
   4692 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4693 			txq->txq_fifo_size =
   4694 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4695 			txq->txq_fifo_stall = 0;
   4696 		}
   4697 		break;
   4698 	case WM_T_82571:
   4699 	case WM_T_82572:
   4700 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4701 	case WM_T_80003:
   4702 		sc->sc_pba = PBA_32K;
   4703 		break;
   4704 	case WM_T_82573:
   4705 		sc->sc_pba = PBA_12K;
   4706 		break;
   4707 	case WM_T_82574:
   4708 	case WM_T_82583:
   4709 		sc->sc_pba = PBA_20K;
   4710 		break;
   4711 	case WM_T_82576:
   4712 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4713 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4714 		break;
   4715 	case WM_T_82580:
   4716 	case WM_T_I350:
   4717 	case WM_T_I354:
   4718 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4719 		break;
   4720 	case WM_T_I210:
   4721 	case WM_T_I211:
   4722 		sc->sc_pba = PBA_34K;
   4723 		break;
   4724 	case WM_T_ICH8:
   4725 		/* Workaround for a bit corruption issue in FIFO memory */
   4726 		sc->sc_pba = PBA_8K;
   4727 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4728 		break;
   4729 	case WM_T_ICH9:
   4730 	case WM_T_ICH10:
   4731 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4732 		    PBA_14K : PBA_10K;
   4733 		break;
   4734 	case WM_T_PCH:
   4735 	case WM_T_PCH2:	/* XXX 14K? */
   4736 	case WM_T_PCH_LPT:
   4737 	case WM_T_PCH_SPT:
   4738 	case WM_T_PCH_CNP:
   4739 		sc->sc_pba = PBA_26K;
   4740 		break;
   4741 	default:
   4742 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4743 		    PBA_40K : PBA_48K;
   4744 		break;
   4745 	}
   4746 	/*
   4747 	 * Only old or non-multiqueue devices have the PBA register
   4748 	 * XXX Need special handling for 82575.
   4749 	 */
   4750 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4751 	    || (sc->sc_type == WM_T_82575))
   4752 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4753 
   4754 	/* Prevent the PCI-E bus from sticking */
   4755 	if (sc->sc_flags & WM_F_PCIE) {
   4756 		int timeout = 800;
   4757 
   4758 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4759 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4760 
   4761 		while (timeout--) {
   4762 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4763 			    == 0)
   4764 				break;
   4765 			delay(100);
   4766 		}
   4767 		if (timeout == 0)
   4768 			device_printf(sc->sc_dev,
   4769 			    "failed to disable busmastering\n");
   4770 	}
   4771 
   4772 	/* Set the completion timeout for interface */
   4773 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4774 	    || (sc->sc_type == WM_T_82580)
   4775 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4776 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4777 		wm_set_pcie_completion_timeout(sc);
   4778 
   4779 	/* Clear interrupt */
   4780 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4781 	if (wm_is_using_msix(sc)) {
   4782 		if (sc->sc_type != WM_T_82574) {
   4783 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4784 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4785 		} else
   4786 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4787 	}
   4788 
   4789 	/* Stop the transmit and receive processes. */
   4790 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4791 	sc->sc_rctl &= ~RCTL_EN;
   4792 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4793 	CSR_WRITE_FLUSH(sc);
   4794 
   4795 	/* XXX set_tbi_sbp_82543() */
   4796 
   4797 	delay(10*1000);
   4798 
   4799 	/* Must acquire the MDIO ownership before MAC reset */
   4800 	switch (sc->sc_type) {
   4801 	case WM_T_82573:
   4802 	case WM_T_82574:
   4803 	case WM_T_82583:
   4804 		error = wm_get_hw_semaphore_82573(sc);
   4805 		break;
   4806 	default:
   4807 		break;
   4808 	}
   4809 
   4810 	/*
   4811 	 * 82541 Errata 29? & 82547 Errata 28?
   4812 	 * See also the description about PHY_RST bit in CTRL register
   4813 	 * in 8254x_GBe_SDM.pdf.
   4814 	 */
   4815 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4816 		CSR_WRITE(sc, WMREG_CTRL,
   4817 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4818 		CSR_WRITE_FLUSH(sc);
   4819 		delay(5000);
   4820 	}
   4821 
   4822 	switch (sc->sc_type) {
   4823 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4824 	case WM_T_82541:
   4825 	case WM_T_82541_2:
   4826 	case WM_T_82547:
   4827 	case WM_T_82547_2:
   4828 		/*
   4829 		 * On some chipsets, a reset through a memory-mapped write
   4830 		 * cycle can cause the chip to reset before completing the
   4831 		 * write cycle. This causes major headache that can be avoided
   4832 		 * by issuing the reset via indirect register writes through
   4833 		 * I/O space.
   4834 		 *
   4835 		 * So, if we successfully mapped the I/O BAR at attach time,
   4836 		 * use that. Otherwise, try our luck with a memory-mapped
   4837 		 * reset.
   4838 		 */
   4839 		if (sc->sc_flags & WM_F_IOH_VALID)
   4840 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4841 		else
   4842 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4843 		break;
   4844 	case WM_T_82545_3:
   4845 	case WM_T_82546_3:
   4846 		/* Use the shadow control register on these chips. */
   4847 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4848 		break;
   4849 	case WM_T_80003:
   4850 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4851 		sc->phy.acquire(sc);
   4852 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4853 		sc->phy.release(sc);
   4854 		break;
   4855 	case WM_T_ICH8:
   4856 	case WM_T_ICH9:
   4857 	case WM_T_ICH10:
   4858 	case WM_T_PCH:
   4859 	case WM_T_PCH2:
   4860 	case WM_T_PCH_LPT:
   4861 	case WM_T_PCH_SPT:
   4862 	case WM_T_PCH_CNP:
   4863 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4864 		if (wm_phy_resetisblocked(sc) == false) {
   4865 			/*
   4866 			 * Gate automatic PHY configuration by hardware on
   4867 			 * non-managed 82579
   4868 			 */
   4869 			if ((sc->sc_type == WM_T_PCH2)
   4870 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4871 				== 0))
   4872 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4873 
   4874 			reg |= CTRL_PHY_RESET;
   4875 			phy_reset = 1;
   4876 		} else
   4877 			printf("XXX reset is blocked!!!\n");
   4878 		sc->phy.acquire(sc);
   4879 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4880 		/* Don't insert a completion barrier when reset */
   4881 		delay(20*1000);
   4882 		mutex_exit(sc->sc_ich_phymtx);
   4883 		break;
   4884 	case WM_T_82580:
   4885 	case WM_T_I350:
   4886 	case WM_T_I354:
   4887 	case WM_T_I210:
   4888 	case WM_T_I211:
   4889 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4890 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4891 			CSR_WRITE_FLUSH(sc);
   4892 		delay(5000);
   4893 		break;
   4894 	case WM_T_82542_2_0:
   4895 	case WM_T_82542_2_1:
   4896 	case WM_T_82543:
   4897 	case WM_T_82540:
   4898 	case WM_T_82545:
   4899 	case WM_T_82546:
   4900 	case WM_T_82571:
   4901 	case WM_T_82572:
   4902 	case WM_T_82573:
   4903 	case WM_T_82574:
   4904 	case WM_T_82575:
   4905 	case WM_T_82576:
   4906 	case WM_T_82583:
   4907 	default:
   4908 		/* Everything else can safely use the documented method. */
   4909 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4910 		break;
   4911 	}
   4912 
   4913 	/* Must release the MDIO ownership after MAC reset */
   4914 	switch (sc->sc_type) {
   4915 	case WM_T_82573:
   4916 	case WM_T_82574:
   4917 	case WM_T_82583:
   4918 		if (error == 0)
   4919 			wm_put_hw_semaphore_82573(sc);
   4920 		break;
   4921 	default:
   4922 		break;
   4923 	}
   4924 
   4925 	/* Set Phy Config Counter to 50msec */
   4926 	if (sc->sc_type == WM_T_PCH2) {
   4927 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4928 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4929 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4930 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4931 	}
   4932 
   4933 	if (phy_reset != 0)
   4934 		wm_get_cfg_done(sc);
   4935 
   4936 	/* reload EEPROM */
   4937 	switch (sc->sc_type) {
   4938 	case WM_T_82542_2_0:
   4939 	case WM_T_82542_2_1:
   4940 	case WM_T_82543:
   4941 	case WM_T_82544:
   4942 		delay(10);
   4943 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4944 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4945 		CSR_WRITE_FLUSH(sc);
   4946 		delay(2000);
   4947 		break;
   4948 	case WM_T_82540:
   4949 	case WM_T_82545:
   4950 	case WM_T_82545_3:
   4951 	case WM_T_82546:
   4952 	case WM_T_82546_3:
   4953 		delay(5*1000);
   4954 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4955 		break;
   4956 	case WM_T_82541:
   4957 	case WM_T_82541_2:
   4958 	case WM_T_82547:
   4959 	case WM_T_82547_2:
   4960 		delay(20000);
   4961 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4962 		break;
   4963 	case WM_T_82571:
   4964 	case WM_T_82572:
   4965 	case WM_T_82573:
   4966 	case WM_T_82574:
   4967 	case WM_T_82583:
   4968 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4969 			delay(10);
   4970 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4971 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4972 			CSR_WRITE_FLUSH(sc);
   4973 		}
   4974 		/* check EECD_EE_AUTORD */
   4975 		wm_get_auto_rd_done(sc);
   4976 		/*
   4977 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4978 		 * is set.
   4979 		 */
   4980 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4981 		    || (sc->sc_type == WM_T_82583))
   4982 			delay(25*1000);
   4983 		break;
   4984 	case WM_T_82575:
   4985 	case WM_T_82576:
   4986 	case WM_T_82580:
   4987 	case WM_T_I350:
   4988 	case WM_T_I354:
   4989 	case WM_T_I210:
   4990 	case WM_T_I211:
   4991 	case WM_T_80003:
   4992 		/* check EECD_EE_AUTORD */
   4993 		wm_get_auto_rd_done(sc);
   4994 		break;
   4995 	case WM_T_ICH8:
   4996 	case WM_T_ICH9:
   4997 	case WM_T_ICH10:
   4998 	case WM_T_PCH:
   4999 	case WM_T_PCH2:
   5000 	case WM_T_PCH_LPT:
   5001 	case WM_T_PCH_SPT:
   5002 	case WM_T_PCH_CNP:
   5003 		break;
   5004 	default:
   5005 		panic("%s: unknown type\n", __func__);
   5006 	}
   5007 
   5008 	/* Check whether EEPROM is present or not */
   5009 	switch (sc->sc_type) {
   5010 	case WM_T_82575:
   5011 	case WM_T_82576:
   5012 	case WM_T_82580:
   5013 	case WM_T_I350:
   5014 	case WM_T_I354:
   5015 	case WM_T_ICH8:
   5016 	case WM_T_ICH9:
   5017 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5018 			/* Not found */
   5019 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5020 			if (sc->sc_type == WM_T_82575)
   5021 				wm_reset_init_script_82575(sc);
   5022 		}
   5023 		break;
   5024 	default:
   5025 		break;
   5026 	}
   5027 
   5028 	if (phy_reset != 0)
   5029 		wm_phy_post_reset(sc);
   5030 
   5031 	if ((sc->sc_type == WM_T_82580)
   5032 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5033 		/* clear global device reset status bit */
   5034 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5035 	}
   5036 
   5037 	/* Clear any pending interrupt events. */
   5038 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5039 	reg = CSR_READ(sc, WMREG_ICR);
   5040 	if (wm_is_using_msix(sc)) {
   5041 		if (sc->sc_type != WM_T_82574) {
   5042 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5043 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5044 		} else
   5045 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5046 	}
   5047 
   5048 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5049 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5050 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5051 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5052 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5053 		reg |= KABGTXD_BGSQLBIAS;
   5054 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5055 	}
   5056 
   5057 	/* reload sc_ctrl */
   5058 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5059 
   5060 	wm_set_eee(sc);
   5061 
   5062 	/*
   5063 	 * For PCH, this write will make sure that any noise will be detected
   5064 	 * as a CRC error and be dropped rather than show up as a bad packet
   5065 	 * to the DMA engine
   5066 	 */
   5067 	if (sc->sc_type == WM_T_PCH)
   5068 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5069 
   5070 	if (sc->sc_type >= WM_T_82544)
   5071 		CSR_WRITE(sc, WMREG_WUC, 0);
   5072 
   5073 	if (sc->sc_type < WM_T_82575)
   5074 		wm_disable_aspm(sc); /* Workaround for some chips */
   5075 
   5076 	wm_reset_mdicnfg_82580(sc);
   5077 
   5078 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5079 		wm_pll_workaround_i210(sc);
   5080 
   5081 	if (sc->sc_type == WM_T_80003) {
   5082 		/* default to TRUE to enable the MDIC W/A */
   5083 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5084 
   5085 		rv = wm_kmrn_readreg(sc,
   5086 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5087 		if (rv == 0) {
   5088 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5089 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5090 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5091 			else
   5092 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5093 		}
   5094 	}
   5095 }
   5096 
   5097 /*
   5098  * wm_add_rxbuf:
   5099  *
   5100  *	Add a receive buffer to the indiciated descriptor.
   5101  */
   5102 static int
   5103 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5104 {
   5105 	struct wm_softc *sc = rxq->rxq_sc;
   5106 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5107 	struct mbuf *m;
   5108 	int error;
   5109 
   5110 	KASSERT(mutex_owned(rxq->rxq_lock));
   5111 
   5112 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5113 	if (m == NULL)
   5114 		return ENOBUFS;
   5115 
   5116 	MCLGET(m, M_DONTWAIT);
   5117 	if ((m->m_flags & M_EXT) == 0) {
   5118 		m_freem(m);
   5119 		return ENOBUFS;
   5120 	}
   5121 
   5122 	if (rxs->rxs_mbuf != NULL)
   5123 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5124 
   5125 	rxs->rxs_mbuf = m;
   5126 
   5127 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5128 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5129 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5130 	if (error) {
   5131 		/* XXX XXX XXX */
   5132 		aprint_error_dev(sc->sc_dev,
   5133 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5134 		panic("wm_add_rxbuf");
   5135 	}
   5136 
   5137 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5138 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5139 
   5140 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5141 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5142 			wm_init_rxdesc(rxq, idx);
   5143 	} else
   5144 		wm_init_rxdesc(rxq, idx);
   5145 
   5146 	return 0;
   5147 }
   5148 
   5149 /*
   5150  * wm_rxdrain:
   5151  *
   5152  *	Drain the receive queue.
   5153  */
   5154 static void
   5155 wm_rxdrain(struct wm_rxqueue *rxq)
   5156 {
   5157 	struct wm_softc *sc = rxq->rxq_sc;
   5158 	struct wm_rxsoft *rxs;
   5159 	int i;
   5160 
   5161 	KASSERT(mutex_owned(rxq->rxq_lock));
   5162 
   5163 	for (i = 0; i < WM_NRXDESC; i++) {
   5164 		rxs = &rxq->rxq_soft[i];
   5165 		if (rxs->rxs_mbuf != NULL) {
   5166 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5167 			m_freem(rxs->rxs_mbuf);
   5168 			rxs->rxs_mbuf = NULL;
   5169 		}
   5170 	}
   5171 }
   5172 
   5173 /*
   5174  * Setup registers for RSS.
   5175  *
   5176  * XXX not yet VMDq support
   5177  */
   5178 static void
   5179 wm_init_rss(struct wm_softc *sc)
   5180 {
   5181 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5182 	int i;
   5183 
   5184 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5185 
   5186 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5187 		int qid, reta_ent;
   5188 
   5189 		qid  = i % sc->sc_nqueues;
   5190 		switch (sc->sc_type) {
   5191 		case WM_T_82574:
   5192 			reta_ent = __SHIFTIN(qid,
   5193 			    RETA_ENT_QINDEX_MASK_82574);
   5194 			break;
   5195 		case WM_T_82575:
   5196 			reta_ent = __SHIFTIN(qid,
   5197 			    RETA_ENT_QINDEX1_MASK_82575);
   5198 			break;
   5199 		default:
   5200 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5201 			break;
   5202 		}
   5203 
   5204 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5205 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5206 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5207 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5208 	}
   5209 
   5210 	rss_getkey((uint8_t *)rss_key);
   5211 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5212 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5213 
   5214 	if (sc->sc_type == WM_T_82574)
   5215 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5216 	else
   5217 		mrqc = MRQC_ENABLE_RSS_MQ;
   5218 
   5219 	/*
   5220 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5221 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5222 	 */
   5223 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5224 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5225 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5226 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5227 
   5228 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5229 }
   5230 
   5231 /*
   5232  * Adjust TX and RX queue numbers which the system actulally uses.
   5233  *
   5234  * The numbers are affected by below parameters.
   5235  *     - The nubmer of hardware queues
   5236  *     - The number of MSI-X vectors (= "nvectors" argument)
   5237  *     - ncpu
   5238  */
   5239 static void
   5240 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5241 {
   5242 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5243 
   5244 	if (nvectors < 2) {
   5245 		sc->sc_nqueues = 1;
   5246 		return;
   5247 	}
   5248 
   5249 	switch (sc->sc_type) {
   5250 	case WM_T_82572:
   5251 		hw_ntxqueues = 2;
   5252 		hw_nrxqueues = 2;
   5253 		break;
   5254 	case WM_T_82574:
   5255 		hw_ntxqueues = 2;
   5256 		hw_nrxqueues = 2;
   5257 		break;
   5258 	case WM_T_82575:
   5259 		hw_ntxqueues = 4;
   5260 		hw_nrxqueues = 4;
   5261 		break;
   5262 	case WM_T_82576:
   5263 		hw_ntxqueues = 16;
   5264 		hw_nrxqueues = 16;
   5265 		break;
   5266 	case WM_T_82580:
   5267 	case WM_T_I350:
   5268 	case WM_T_I354:
   5269 		hw_ntxqueues = 8;
   5270 		hw_nrxqueues = 8;
   5271 		break;
   5272 	case WM_T_I210:
   5273 		hw_ntxqueues = 4;
   5274 		hw_nrxqueues = 4;
   5275 		break;
   5276 	case WM_T_I211:
   5277 		hw_ntxqueues = 2;
   5278 		hw_nrxqueues = 2;
   5279 		break;
   5280 		/*
   5281 		 * As below ethernet controllers does not support MSI-X,
   5282 		 * this driver let them not use multiqueue.
   5283 		 *     - WM_T_80003
   5284 		 *     - WM_T_ICH8
   5285 		 *     - WM_T_ICH9
   5286 		 *     - WM_T_ICH10
   5287 		 *     - WM_T_PCH
   5288 		 *     - WM_T_PCH2
   5289 		 *     - WM_T_PCH_LPT
   5290 		 */
   5291 	default:
   5292 		hw_ntxqueues = 1;
   5293 		hw_nrxqueues = 1;
   5294 		break;
   5295 	}
   5296 
   5297 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5298 
   5299 	/*
   5300 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5301 	 * the number of queues used actually.
   5302 	 */
   5303 	if (nvectors < hw_nqueues + 1)
   5304 		sc->sc_nqueues = nvectors - 1;
   5305 	else
   5306 		sc->sc_nqueues = hw_nqueues;
   5307 
   5308 	/*
   5309 	 * As queues more then cpus cannot improve scaling, we limit
   5310 	 * the number of queues used actually.
   5311 	 */
   5312 	if (ncpu < sc->sc_nqueues)
   5313 		sc->sc_nqueues = ncpu;
   5314 }
   5315 
   5316 static inline bool
   5317 wm_is_using_msix(struct wm_softc *sc)
   5318 {
   5319 
   5320 	return (sc->sc_nintrs > 1);
   5321 }
   5322 
   5323 static inline bool
   5324 wm_is_using_multiqueue(struct wm_softc *sc)
   5325 {
   5326 
   5327 	return (sc->sc_nqueues > 1);
   5328 }
   5329 
   5330 static int
   5331 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5332 {
   5333 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5334 	wmq->wmq_id = qidx;
   5335 	wmq->wmq_intr_idx = intr_idx;
   5336 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5337 #ifdef WM_MPSAFE
   5338 	    | SOFTINT_MPSAFE
   5339 #endif
   5340 	    , wm_handle_queue, wmq);
   5341 	if (wmq->wmq_si != NULL)
   5342 		return 0;
   5343 
   5344 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5345 	    wmq->wmq_id);
   5346 
   5347 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5348 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5349 	return ENOMEM;
   5350 }
   5351 
   5352 /*
   5353  * Both single interrupt MSI and INTx can use this function.
   5354  */
   5355 static int
   5356 wm_setup_legacy(struct wm_softc *sc)
   5357 {
   5358 	pci_chipset_tag_t pc = sc->sc_pc;
   5359 	const char *intrstr = NULL;
   5360 	char intrbuf[PCI_INTRSTR_LEN];
   5361 	int error;
   5362 
   5363 	error = wm_alloc_txrx_queues(sc);
   5364 	if (error) {
   5365 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5366 		    error);
   5367 		return ENOMEM;
   5368 	}
   5369 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5370 	    sizeof(intrbuf));
   5371 #ifdef WM_MPSAFE
   5372 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5373 #endif
   5374 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5375 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5376 	if (sc->sc_ihs[0] == NULL) {
   5377 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5378 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5379 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5380 		return ENOMEM;
   5381 	}
   5382 
   5383 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5384 	sc->sc_nintrs = 1;
   5385 
   5386 	return wm_softint_establish(sc, 0, 0);
   5387 }
   5388 
   5389 static int
   5390 wm_setup_msix(struct wm_softc *sc)
   5391 {
   5392 	void *vih;
   5393 	kcpuset_t *affinity;
   5394 	int qidx, error, intr_idx, txrx_established;
   5395 	pci_chipset_tag_t pc = sc->sc_pc;
   5396 	const char *intrstr = NULL;
   5397 	char intrbuf[PCI_INTRSTR_LEN];
   5398 	char intr_xname[INTRDEVNAMEBUF];
   5399 
   5400 	if (sc->sc_nqueues < ncpu) {
   5401 		/*
   5402 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5403 		 * interrupts start from CPU#1.
   5404 		 */
   5405 		sc->sc_affinity_offset = 1;
   5406 	} else {
   5407 		/*
   5408 		 * In this case, this device use all CPUs. So, we unify
   5409 		 * affinitied cpu_index to msix vector number for readability.
   5410 		 */
   5411 		sc->sc_affinity_offset = 0;
   5412 	}
   5413 
   5414 	error = wm_alloc_txrx_queues(sc);
   5415 	if (error) {
   5416 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5417 		    error);
   5418 		return ENOMEM;
   5419 	}
   5420 
   5421 	kcpuset_create(&affinity, false);
   5422 	intr_idx = 0;
   5423 
   5424 	/*
   5425 	 * TX and RX
   5426 	 */
   5427 	txrx_established = 0;
   5428 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5429 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5430 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5431 
   5432 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5433 		    sizeof(intrbuf));
   5434 #ifdef WM_MPSAFE
   5435 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5436 		    PCI_INTR_MPSAFE, true);
   5437 #endif
   5438 		memset(intr_xname, 0, sizeof(intr_xname));
   5439 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5440 		    device_xname(sc->sc_dev), qidx);
   5441 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5442 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5443 		if (vih == NULL) {
   5444 			aprint_error_dev(sc->sc_dev,
   5445 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5446 			    intrstr ? " at " : "",
   5447 			    intrstr ? intrstr : "");
   5448 
   5449 			goto fail;
   5450 		}
   5451 		kcpuset_zero(affinity);
   5452 		/* Round-robin affinity */
   5453 		kcpuset_set(affinity, affinity_to);
   5454 		error = interrupt_distribute(vih, affinity, NULL);
   5455 		if (error == 0) {
   5456 			aprint_normal_dev(sc->sc_dev,
   5457 			    "for TX and RX interrupting at %s affinity to %u\n",
   5458 			    intrstr, affinity_to);
   5459 		} else {
   5460 			aprint_normal_dev(sc->sc_dev,
   5461 			    "for TX and RX interrupting at %s\n", intrstr);
   5462 		}
   5463 		sc->sc_ihs[intr_idx] = vih;
   5464 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5465 			goto fail;
   5466 		txrx_established++;
   5467 		intr_idx++;
   5468 	}
   5469 
   5470 	/*
   5471 	 * LINK
   5472 	 */
   5473 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5474 	    sizeof(intrbuf));
   5475 #ifdef WM_MPSAFE
   5476 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5477 #endif
   5478 	memset(intr_xname, 0, sizeof(intr_xname));
   5479 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5480 	    device_xname(sc->sc_dev));
   5481 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5482 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5483 	if (vih == NULL) {
   5484 		aprint_error_dev(sc->sc_dev,
   5485 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5486 		    intrstr ? " at " : "",
   5487 		    intrstr ? intrstr : "");
   5488 
   5489 		goto fail;
   5490 	}
   5491 	/* keep default affinity to LINK interrupt */
   5492 	aprint_normal_dev(sc->sc_dev,
   5493 	    "for LINK interrupting at %s\n", intrstr);
   5494 	sc->sc_ihs[intr_idx] = vih;
   5495 	sc->sc_link_intr_idx = intr_idx;
   5496 
   5497 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5498 	kcpuset_destroy(affinity);
   5499 	return 0;
   5500 
   5501  fail:
   5502 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5503 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5504 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5505 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5506 	}
   5507 
   5508 	kcpuset_destroy(affinity);
   5509 	return ENOMEM;
   5510 }
   5511 
   5512 static void
   5513 wm_unset_stopping_flags(struct wm_softc *sc)
   5514 {
   5515 	int i;
   5516 
   5517 	KASSERT(WM_CORE_LOCKED(sc));
   5518 
   5519 	/*
   5520 	 * must unset stopping flags in ascending order.
   5521 	 */
   5522 	for (i = 0; i < sc->sc_nqueues; i++) {
   5523 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5524 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5525 
   5526 		mutex_enter(txq->txq_lock);
   5527 		txq->txq_stopping = false;
   5528 		mutex_exit(txq->txq_lock);
   5529 
   5530 		mutex_enter(rxq->rxq_lock);
   5531 		rxq->rxq_stopping = false;
   5532 		mutex_exit(rxq->rxq_lock);
   5533 	}
   5534 
   5535 	sc->sc_core_stopping = false;
   5536 }
   5537 
   5538 static void
   5539 wm_set_stopping_flags(struct wm_softc *sc)
   5540 {
   5541 	int i;
   5542 
   5543 	KASSERT(WM_CORE_LOCKED(sc));
   5544 
   5545 	sc->sc_core_stopping = true;
   5546 
   5547 	/*
   5548 	 * must set stopping flags in ascending order.
   5549 	 */
   5550 	for (i = 0; i < sc->sc_nqueues; i++) {
   5551 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5552 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5553 
   5554 		mutex_enter(rxq->rxq_lock);
   5555 		rxq->rxq_stopping = true;
   5556 		mutex_exit(rxq->rxq_lock);
   5557 
   5558 		mutex_enter(txq->txq_lock);
   5559 		txq->txq_stopping = true;
   5560 		mutex_exit(txq->txq_lock);
   5561 	}
   5562 }
   5563 
   5564 /*
   5565  * write interrupt interval value to ITR or EITR
   5566  */
   5567 static void
   5568 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5569 {
   5570 
   5571 	if (!wmq->wmq_set_itr)
   5572 		return;
   5573 
   5574 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5575 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5576 
   5577 		/*
   5578 		 * 82575 doesn't have CNT_INGR field.
   5579 		 * So, overwrite counter field by software.
   5580 		 */
   5581 		if (sc->sc_type == WM_T_82575)
   5582 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5583 		else
   5584 			eitr |= EITR_CNT_INGR;
   5585 
   5586 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5587 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5588 		/*
   5589 		 * 82574 has both ITR and EITR. SET EITR when we use
   5590 		 * the multi queue function with MSI-X.
   5591 		 */
   5592 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5593 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5594 	} else {
   5595 		KASSERT(wmq->wmq_id == 0);
   5596 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5597 	}
   5598 
   5599 	wmq->wmq_set_itr = false;
   5600 }
   5601 
   5602 /*
   5603  * TODO
   5604  * Below dynamic calculation of itr is almost the same as linux igb,
   5605  * however it does not fit to wm(4). So, we will have been disable AIM
   5606  * until we will find appropriate calculation of itr.
   5607  */
   5608 /*
   5609  * calculate interrupt interval value to be going to write register in
   5610  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5611  */
   5612 static void
   5613 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5614 {
   5615 #ifdef NOTYET
   5616 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5617 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5618 	uint32_t avg_size = 0;
   5619 	uint32_t new_itr;
   5620 
   5621 	if (rxq->rxq_packets)
   5622 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5623 	if (txq->txq_packets)
   5624 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5625 
   5626 	if (avg_size == 0) {
   5627 		new_itr = 450; /* restore default value */
   5628 		goto out;
   5629 	}
   5630 
   5631 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5632 	avg_size += 24;
   5633 
   5634 	/* Don't starve jumbo frames */
   5635 	avg_size = uimin(avg_size, 3000);
   5636 
   5637 	/* Give a little boost to mid-size frames */
   5638 	if ((avg_size > 300) && (avg_size < 1200))
   5639 		new_itr = avg_size / 3;
   5640 	else
   5641 		new_itr = avg_size / 2;
   5642 
   5643 out:
   5644 	/*
   5645 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5646 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5647 	 */
   5648 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5649 		new_itr *= 4;
   5650 
   5651 	if (new_itr != wmq->wmq_itr) {
   5652 		wmq->wmq_itr = new_itr;
   5653 		wmq->wmq_set_itr = true;
   5654 	} else
   5655 		wmq->wmq_set_itr = false;
   5656 
   5657 	rxq->rxq_packets = 0;
   5658 	rxq->rxq_bytes = 0;
   5659 	txq->txq_packets = 0;
   5660 	txq->txq_bytes = 0;
   5661 #endif
   5662 }
   5663 
   5664 /*
   5665  * wm_init:		[ifnet interface function]
   5666  *
   5667  *	Initialize the interface.
   5668  */
   5669 static int
   5670 wm_init(struct ifnet *ifp)
   5671 {
   5672 	struct wm_softc *sc = ifp->if_softc;
   5673 	int ret;
   5674 
   5675 	WM_CORE_LOCK(sc);
   5676 	ret = wm_init_locked(ifp);
   5677 	WM_CORE_UNLOCK(sc);
   5678 
   5679 	return ret;
   5680 }
   5681 
   5682 static int
   5683 wm_init_locked(struct ifnet *ifp)
   5684 {
   5685 	struct wm_softc *sc = ifp->if_softc;
   5686 	struct ethercom *ec = &sc->sc_ethercom;
   5687 	int i, j, trynum, error = 0;
   5688 	uint32_t reg;
   5689 
   5690 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5691 		device_xname(sc->sc_dev), __func__));
   5692 	KASSERT(WM_CORE_LOCKED(sc));
   5693 
   5694 	/*
   5695 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5696 	 * There is a small but measurable benefit to avoiding the adjusment
   5697 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5698 	 * on such platforms.  One possibility is that the DMA itself is
   5699 	 * slightly more efficient if the front of the entire packet (instead
   5700 	 * of the front of the headers) is aligned.
   5701 	 *
   5702 	 * Note we must always set align_tweak to 0 if we are using
   5703 	 * jumbo frames.
   5704 	 */
   5705 #ifdef __NO_STRICT_ALIGNMENT
   5706 	sc->sc_align_tweak = 0;
   5707 #else
   5708 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5709 		sc->sc_align_tweak = 0;
   5710 	else
   5711 		sc->sc_align_tweak = 2;
   5712 #endif /* __NO_STRICT_ALIGNMENT */
   5713 
   5714 	/* Cancel any pending I/O. */
   5715 	wm_stop_locked(ifp, 0);
   5716 
   5717 	/* update statistics before reset */
   5718 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5719 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5720 
   5721 	/* PCH_SPT hardware workaround */
   5722 	if (sc->sc_type == WM_T_PCH_SPT)
   5723 		wm_flush_desc_rings(sc);
   5724 
   5725 	/* Reset the chip to a known state. */
   5726 	wm_reset(sc);
   5727 
   5728 	/*
   5729 	 * AMT based hardware can now take control from firmware
   5730 	 * Do this after reset.
   5731 	 */
   5732 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5733 		wm_get_hw_control(sc);
   5734 
   5735 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5736 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5737 		wm_legacy_irq_quirk_spt(sc);
   5738 
   5739 	/* Init hardware bits */
   5740 	wm_initialize_hardware_bits(sc);
   5741 
   5742 	/* Reset the PHY. */
   5743 	if (sc->sc_flags & WM_F_HAS_MII)
   5744 		wm_gmii_reset(sc);
   5745 
   5746 	if (sc->sc_type >= WM_T_ICH8) {
   5747 		reg = CSR_READ(sc, WMREG_GCR);
   5748 		/*
   5749 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5750 		 * default after reset.
   5751 		 */
   5752 		if (sc->sc_type == WM_T_ICH8)
   5753 			reg |= GCR_NO_SNOOP_ALL;
   5754 		else
   5755 			reg &= ~GCR_NO_SNOOP_ALL;
   5756 		CSR_WRITE(sc, WMREG_GCR, reg);
   5757 	}
   5758 	if ((sc->sc_type >= WM_T_ICH8)
   5759 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5760 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5761 
   5762 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5763 		reg |= CTRL_EXT_RO_DIS;
   5764 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5765 	}
   5766 
   5767 	/* Calculate (E)ITR value */
   5768 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5769 		/*
   5770 		 * For NEWQUEUE's EITR (except for 82575).
   5771 		 * 82575's EITR should be set same throttling value as other
   5772 		 * old controllers' ITR because the interrupt/sec calculation
   5773 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5774 		 *
   5775 		 * 82574's EITR should be set same throttling value as ITR.
   5776 		 *
   5777 		 * For N interrupts/sec, set this value to:
   5778 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5779 		 */
   5780 		sc->sc_itr_init = 450;
   5781 	} else if (sc->sc_type >= WM_T_82543) {
   5782 		/*
   5783 		 * Set up the interrupt throttling register (units of 256ns)
   5784 		 * Note that a footnote in Intel's documentation says this
   5785 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5786 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5787 		 * that that is also true for the 1024ns units of the other
   5788 		 * interrupt-related timer registers -- so, really, we ought
   5789 		 * to divide this value by 4 when the link speed is low.
   5790 		 *
   5791 		 * XXX implement this division at link speed change!
   5792 		 */
   5793 
   5794 		/*
   5795 		 * For N interrupts/sec, set this value to:
   5796 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5797 		 * absolute and packet timer values to this value
   5798 		 * divided by 4 to get "simple timer" behavior.
   5799 		 */
   5800 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5801 	}
   5802 
   5803 	error = wm_init_txrx_queues(sc);
   5804 	if (error)
   5805 		goto out;
   5806 
   5807 	/*
   5808 	 * Clear out the VLAN table -- we don't use it (yet).
   5809 	 */
   5810 	CSR_WRITE(sc, WMREG_VET, 0);
   5811 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5812 		trynum = 10; /* Due to hw errata */
   5813 	else
   5814 		trynum = 1;
   5815 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5816 		for (j = 0; j < trynum; j++)
   5817 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5818 
   5819 	/*
   5820 	 * Set up flow-control parameters.
   5821 	 *
   5822 	 * XXX Values could probably stand some tuning.
   5823 	 */
   5824 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5825 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5826 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5827 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5828 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5829 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5830 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5831 	}
   5832 
   5833 	sc->sc_fcrtl = FCRTL_DFLT;
   5834 	if (sc->sc_type < WM_T_82543) {
   5835 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5836 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5837 	} else {
   5838 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5839 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5840 	}
   5841 
   5842 	if (sc->sc_type == WM_T_80003)
   5843 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5844 	else
   5845 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5846 
   5847 	/* Writes the control register. */
   5848 	wm_set_vlan(sc);
   5849 
   5850 	if (sc->sc_flags & WM_F_HAS_MII) {
   5851 		uint16_t kmreg;
   5852 
   5853 		switch (sc->sc_type) {
   5854 		case WM_T_80003:
   5855 		case WM_T_ICH8:
   5856 		case WM_T_ICH9:
   5857 		case WM_T_ICH10:
   5858 		case WM_T_PCH:
   5859 		case WM_T_PCH2:
   5860 		case WM_T_PCH_LPT:
   5861 		case WM_T_PCH_SPT:
   5862 		case WM_T_PCH_CNP:
   5863 			/*
   5864 			 * Set the mac to wait the maximum time between each
   5865 			 * iteration and increase the max iterations when
   5866 			 * polling the phy; this fixes erroneous timeouts at
   5867 			 * 10Mbps.
   5868 			 */
   5869 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5870 			    0xFFFF);
   5871 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5872 			    &kmreg);
   5873 			kmreg |= 0x3F;
   5874 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5875 			    kmreg);
   5876 			break;
   5877 		default:
   5878 			break;
   5879 		}
   5880 
   5881 		if (sc->sc_type == WM_T_80003) {
   5882 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5883 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5884 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5885 
   5886 			/* Bypass RX and TX FIFO's */
   5887 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5888 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5889 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5890 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5891 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5892 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5893 		}
   5894 	}
   5895 #if 0
   5896 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5897 #endif
   5898 
   5899 	/* Set up checksum offload parameters. */
   5900 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5901 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5902 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5903 		reg |= RXCSUM_IPOFL;
   5904 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5905 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5906 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5907 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5908 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5909 
   5910 	/* Set registers about MSI-X */
   5911 	if (wm_is_using_msix(sc)) {
   5912 		uint32_t ivar;
   5913 		struct wm_queue *wmq;
   5914 		int qid, qintr_idx;
   5915 
   5916 		if (sc->sc_type == WM_T_82575) {
   5917 			/* Interrupt control */
   5918 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5919 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5920 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5921 
   5922 			/* TX and RX */
   5923 			for (i = 0; i < sc->sc_nqueues; i++) {
   5924 				wmq = &sc->sc_queue[i];
   5925 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5926 				    EITR_TX_QUEUE(wmq->wmq_id)
   5927 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5928 			}
   5929 			/* Link status */
   5930 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5931 			    EITR_OTHER);
   5932 		} else if (sc->sc_type == WM_T_82574) {
   5933 			/* Interrupt control */
   5934 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5935 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5936 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5937 
   5938 			/*
   5939 			 * workaround issue with spurious interrupts
   5940 			 * in MSI-X mode.
   5941 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5942 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5943 			 */
   5944 			reg = CSR_READ(sc, WMREG_RFCTL);
   5945 			reg |= WMREG_RFCTL_ACKDIS;
   5946 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5947 
   5948 			ivar = 0;
   5949 			/* TX and RX */
   5950 			for (i = 0; i < sc->sc_nqueues; i++) {
   5951 				wmq = &sc->sc_queue[i];
   5952 				qid = wmq->wmq_id;
   5953 				qintr_idx = wmq->wmq_intr_idx;
   5954 
   5955 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5956 				    IVAR_TX_MASK_Q_82574(qid));
   5957 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5958 				    IVAR_RX_MASK_Q_82574(qid));
   5959 			}
   5960 			/* Link status */
   5961 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5962 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5963 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5964 		} else {
   5965 			/* Interrupt control */
   5966 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5967 			    | GPIE_EIAME | GPIE_PBA);
   5968 
   5969 			switch (sc->sc_type) {
   5970 			case WM_T_82580:
   5971 			case WM_T_I350:
   5972 			case WM_T_I354:
   5973 			case WM_T_I210:
   5974 			case WM_T_I211:
   5975 				/* TX and RX */
   5976 				for (i = 0; i < sc->sc_nqueues; i++) {
   5977 					wmq = &sc->sc_queue[i];
   5978 					qid = wmq->wmq_id;
   5979 					qintr_idx = wmq->wmq_intr_idx;
   5980 
   5981 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5982 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5983 					ivar |= __SHIFTIN((qintr_idx
   5984 						| IVAR_VALID),
   5985 					    IVAR_TX_MASK_Q(qid));
   5986 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5987 					ivar |= __SHIFTIN((qintr_idx
   5988 						| IVAR_VALID),
   5989 					    IVAR_RX_MASK_Q(qid));
   5990 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5991 				}
   5992 				break;
   5993 			case WM_T_82576:
   5994 				/* TX and RX */
   5995 				for (i = 0; i < sc->sc_nqueues; i++) {
   5996 					wmq = &sc->sc_queue[i];
   5997 					qid = wmq->wmq_id;
   5998 					qintr_idx = wmq->wmq_intr_idx;
   5999 
   6000 					ivar = CSR_READ(sc,
   6001 					    WMREG_IVAR_Q_82576(qid));
   6002 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6003 					ivar |= __SHIFTIN((qintr_idx
   6004 						| IVAR_VALID),
   6005 					    IVAR_TX_MASK_Q_82576(qid));
   6006 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6007 					ivar |= __SHIFTIN((qintr_idx
   6008 						| IVAR_VALID),
   6009 					    IVAR_RX_MASK_Q_82576(qid));
   6010 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6011 					    ivar);
   6012 				}
   6013 				break;
   6014 			default:
   6015 				break;
   6016 			}
   6017 
   6018 			/* Link status */
   6019 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6020 			    IVAR_MISC_OTHER);
   6021 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6022 		}
   6023 
   6024 		if (wm_is_using_multiqueue(sc)) {
   6025 			wm_init_rss(sc);
   6026 
   6027 			/*
   6028 			** NOTE: Receive Full-Packet Checksum Offload
   6029 			** is mutually exclusive with Multiqueue. However
   6030 			** this is not the same as TCP/IP checksums which
   6031 			** still work.
   6032 			*/
   6033 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6034 			reg |= RXCSUM_PCSD;
   6035 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6036 		}
   6037 	}
   6038 
   6039 	/* Set up the interrupt registers. */
   6040 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6041 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6042 	    ICR_RXO | ICR_RXT0;
   6043 	if (wm_is_using_msix(sc)) {
   6044 		uint32_t mask;
   6045 		struct wm_queue *wmq;
   6046 
   6047 		switch (sc->sc_type) {
   6048 		case WM_T_82574:
   6049 			mask = 0;
   6050 			for (i = 0; i < sc->sc_nqueues; i++) {
   6051 				wmq = &sc->sc_queue[i];
   6052 				mask |= ICR_TXQ(wmq->wmq_id);
   6053 				mask |= ICR_RXQ(wmq->wmq_id);
   6054 			}
   6055 			mask |= ICR_OTHER;
   6056 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6057 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6058 			break;
   6059 		default:
   6060 			if (sc->sc_type == WM_T_82575) {
   6061 				mask = 0;
   6062 				for (i = 0; i < sc->sc_nqueues; i++) {
   6063 					wmq = &sc->sc_queue[i];
   6064 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6065 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6066 				}
   6067 				mask |= EITR_OTHER;
   6068 			} else {
   6069 				mask = 0;
   6070 				for (i = 0; i < sc->sc_nqueues; i++) {
   6071 					wmq = &sc->sc_queue[i];
   6072 					mask |= 1 << wmq->wmq_intr_idx;
   6073 				}
   6074 				mask |= 1 << sc->sc_link_intr_idx;
   6075 			}
   6076 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6077 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6078 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6079 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6080 			break;
   6081 		}
   6082 	} else
   6083 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6084 
   6085 	/* Set up the inter-packet gap. */
   6086 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6087 
   6088 	if (sc->sc_type >= WM_T_82543) {
   6089 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6090 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6091 			wm_itrs_writereg(sc, wmq);
   6092 		}
   6093 		/*
   6094 		 * Link interrupts occur much less than TX
   6095 		 * interrupts and RX interrupts. So, we don't
   6096 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6097 		 * FreeBSD's if_igb.
   6098 		 */
   6099 	}
   6100 
   6101 	/* Set the VLAN ethernetype. */
   6102 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6103 
   6104 	/*
   6105 	 * Set up the transmit control register; we start out with
   6106 	 * a collision distance suitable for FDX, but update it whe
   6107 	 * we resolve the media type.
   6108 	 */
   6109 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6110 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6111 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6112 	if (sc->sc_type >= WM_T_82571)
   6113 		sc->sc_tctl |= TCTL_MULR;
   6114 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6115 
   6116 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6117 		/* Write TDT after TCTL.EN is set. See the document. */
   6118 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6119 	}
   6120 
   6121 	if (sc->sc_type == WM_T_80003) {
   6122 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6123 		reg &= ~TCTL_EXT_GCEX_MASK;
   6124 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6125 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6126 	}
   6127 
   6128 	/* Set the media. */
   6129 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6130 		goto out;
   6131 
   6132 	/* Configure for OS presence */
   6133 	wm_init_manageability(sc);
   6134 
   6135 	/*
   6136 	 * Set up the receive control register; we actually program the
   6137 	 * register when we set the receive filter. Use multicast address
   6138 	 * offset type 0.
   6139 	 *
   6140 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6141 	 * don't enable that feature.
   6142 	 */
   6143 	sc->sc_mchash_type = 0;
   6144 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6145 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6146 
   6147 	/*
   6148 	 * 82574 use one buffer extended Rx descriptor.
   6149 	 */
   6150 	if (sc->sc_type == WM_T_82574)
   6151 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6152 
   6153 	/*
   6154 	 * The I350 has a bug where it always strips the CRC whether
   6155 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6156 	 */
   6157 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6158 	    || (sc->sc_type == WM_T_I210))
   6159 		sc->sc_rctl |= RCTL_SECRC;
   6160 
   6161 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6162 	    && (ifp->if_mtu > ETHERMTU)) {
   6163 		sc->sc_rctl |= RCTL_LPE;
   6164 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6165 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6166 	}
   6167 
   6168 	if (MCLBYTES == 2048)
   6169 		sc->sc_rctl |= RCTL_2k;
   6170 	else {
   6171 		if (sc->sc_type >= WM_T_82543) {
   6172 			switch (MCLBYTES) {
   6173 			case 4096:
   6174 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6175 				break;
   6176 			case 8192:
   6177 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6178 				break;
   6179 			case 16384:
   6180 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6181 				break;
   6182 			default:
   6183 				panic("wm_init: MCLBYTES %d unsupported",
   6184 				    MCLBYTES);
   6185 				break;
   6186 			}
   6187 		} else
   6188 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6189 	}
   6190 
   6191 	/* Enable ECC */
   6192 	switch (sc->sc_type) {
   6193 	case WM_T_82571:
   6194 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6195 		reg |= PBA_ECC_CORR_EN;
   6196 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6197 		break;
   6198 	case WM_T_PCH_LPT:
   6199 	case WM_T_PCH_SPT:
   6200 	case WM_T_PCH_CNP:
   6201 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6202 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6203 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6204 
   6205 		sc->sc_ctrl |= CTRL_MEHE;
   6206 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6207 		break;
   6208 	default:
   6209 		break;
   6210 	}
   6211 
   6212 	/*
   6213 	 * Set the receive filter.
   6214 	 *
   6215 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6216 	 * the setting of RCTL.EN in wm_set_filter()
   6217 	 */
   6218 	wm_set_filter(sc);
   6219 
   6220 	/* On 575 and later set RDT only if RX enabled */
   6221 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6222 		int qidx;
   6223 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6224 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6225 			for (i = 0; i < WM_NRXDESC; i++) {
   6226 				mutex_enter(rxq->rxq_lock);
   6227 				wm_init_rxdesc(rxq, i);
   6228 				mutex_exit(rxq->rxq_lock);
   6229 
   6230 			}
   6231 		}
   6232 	}
   6233 
   6234 	wm_unset_stopping_flags(sc);
   6235 
   6236 	/* Start the one second link check clock. */
   6237 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6238 
   6239 	/* ...all done! */
   6240 	ifp->if_flags |= IFF_RUNNING;
   6241 	ifp->if_flags &= ~IFF_OACTIVE;
   6242 
   6243  out:
   6244 	/* Save last flags for the callback */
   6245 	sc->sc_if_flags = ifp->if_flags;
   6246 	sc->sc_ec_capenable = ec->ec_capenable;
   6247 	if (error)
   6248 		log(LOG_ERR, "%s: interface not running\n",
   6249 		    device_xname(sc->sc_dev));
   6250 	return error;
   6251 }
   6252 
   6253 /*
   6254  * wm_stop:		[ifnet interface function]
   6255  *
   6256  *	Stop transmission on the interface.
   6257  */
   6258 static void
   6259 wm_stop(struct ifnet *ifp, int disable)
   6260 {
   6261 	struct wm_softc *sc = ifp->if_softc;
   6262 
   6263 	WM_CORE_LOCK(sc);
   6264 	wm_stop_locked(ifp, disable);
   6265 	WM_CORE_UNLOCK(sc);
   6266 }
   6267 
   6268 static void
   6269 wm_stop_locked(struct ifnet *ifp, int disable)
   6270 {
   6271 	struct wm_softc *sc = ifp->if_softc;
   6272 	struct wm_txsoft *txs;
   6273 	int i, qidx;
   6274 
   6275 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6276 		device_xname(sc->sc_dev), __func__));
   6277 	KASSERT(WM_CORE_LOCKED(sc));
   6278 
   6279 	wm_set_stopping_flags(sc);
   6280 
   6281 	/* Stop the one second clock. */
   6282 	callout_stop(&sc->sc_tick_ch);
   6283 
   6284 	/* Stop the 82547 Tx FIFO stall check timer. */
   6285 	if (sc->sc_type == WM_T_82547)
   6286 		callout_stop(&sc->sc_txfifo_ch);
   6287 
   6288 	if (sc->sc_flags & WM_F_HAS_MII) {
   6289 		/* Down the MII. */
   6290 		mii_down(&sc->sc_mii);
   6291 	} else {
   6292 #if 0
   6293 		/* Should we clear PHY's status properly? */
   6294 		wm_reset(sc);
   6295 #endif
   6296 	}
   6297 
   6298 	/* Stop the transmit and receive processes. */
   6299 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6300 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6301 	sc->sc_rctl &= ~RCTL_EN;
   6302 
   6303 	/*
   6304 	 * Clear the interrupt mask to ensure the device cannot assert its
   6305 	 * interrupt line.
   6306 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6307 	 * service any currently pending or shared interrupt.
   6308 	 */
   6309 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6310 	sc->sc_icr = 0;
   6311 	if (wm_is_using_msix(sc)) {
   6312 		if (sc->sc_type != WM_T_82574) {
   6313 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6314 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6315 		} else
   6316 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6317 	}
   6318 
   6319 	/* Release any queued transmit buffers. */
   6320 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6321 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6322 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6323 		mutex_enter(txq->txq_lock);
   6324 		txq->txq_sending = false; /* ensure watchdog disabled */
   6325 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6326 			txs = &txq->txq_soft[i];
   6327 			if (txs->txs_mbuf != NULL) {
   6328 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6329 				m_freem(txs->txs_mbuf);
   6330 				txs->txs_mbuf = NULL;
   6331 			}
   6332 		}
   6333 		mutex_exit(txq->txq_lock);
   6334 	}
   6335 
   6336 	/* Mark the interface as down and cancel the watchdog timer. */
   6337 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6338 
   6339 	if (disable) {
   6340 		for (i = 0; i < sc->sc_nqueues; i++) {
   6341 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6342 			mutex_enter(rxq->rxq_lock);
   6343 			wm_rxdrain(rxq);
   6344 			mutex_exit(rxq->rxq_lock);
   6345 		}
   6346 	}
   6347 
   6348 #if 0 /* notyet */
   6349 	if (sc->sc_type >= WM_T_82544)
   6350 		CSR_WRITE(sc, WMREG_WUC, 0);
   6351 #endif
   6352 }
   6353 
   6354 static void
   6355 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6356 {
   6357 	struct mbuf *m;
   6358 	int i;
   6359 
   6360 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6361 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6362 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6363 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6364 		    m->m_data, m->m_len, m->m_flags);
   6365 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6366 	    i, i == 1 ? "" : "s");
   6367 }
   6368 
   6369 /*
   6370  * wm_82547_txfifo_stall:
   6371  *
   6372  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6373  *	reset the FIFO pointers, and restart packet transmission.
   6374  */
   6375 static void
   6376 wm_82547_txfifo_stall(void *arg)
   6377 {
   6378 	struct wm_softc *sc = arg;
   6379 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6380 
   6381 	mutex_enter(txq->txq_lock);
   6382 
   6383 	if (txq->txq_stopping)
   6384 		goto out;
   6385 
   6386 	if (txq->txq_fifo_stall) {
   6387 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6388 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6389 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6390 			/*
   6391 			 * Packets have drained.  Stop transmitter, reset
   6392 			 * FIFO pointers, restart transmitter, and kick
   6393 			 * the packet queue.
   6394 			 */
   6395 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6396 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6397 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6398 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6399 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6400 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6401 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6402 			CSR_WRITE_FLUSH(sc);
   6403 
   6404 			txq->txq_fifo_head = 0;
   6405 			txq->txq_fifo_stall = 0;
   6406 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6407 		} else {
   6408 			/*
   6409 			 * Still waiting for packets to drain; try again in
   6410 			 * another tick.
   6411 			 */
   6412 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6413 		}
   6414 	}
   6415 
   6416 out:
   6417 	mutex_exit(txq->txq_lock);
   6418 }
   6419 
   6420 /*
   6421  * wm_82547_txfifo_bugchk:
   6422  *
   6423  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6424  *	prevent enqueueing a packet that would wrap around the end
   6425  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6426  *
   6427  *	We do this by checking the amount of space before the end
   6428  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6429  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6430  *	the internal FIFO pointers to the beginning, and restart
   6431  *	transmission on the interface.
   6432  */
   6433 #define	WM_FIFO_HDR		0x10
   6434 #define	WM_82547_PAD_LEN	0x3e0
   6435 static int
   6436 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6437 {
   6438 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6439 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6440 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6441 
   6442 	/* Just return if already stalled. */
   6443 	if (txq->txq_fifo_stall)
   6444 		return 1;
   6445 
   6446 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6447 		/* Stall only occurs in half-duplex mode. */
   6448 		goto send_packet;
   6449 	}
   6450 
   6451 	if (len >= WM_82547_PAD_LEN + space) {
   6452 		txq->txq_fifo_stall = 1;
   6453 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6454 		return 1;
   6455 	}
   6456 
   6457  send_packet:
   6458 	txq->txq_fifo_head += len;
   6459 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6460 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6461 
   6462 	return 0;
   6463 }
   6464 
   6465 static int
   6466 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6467 {
   6468 	int error;
   6469 
   6470 	/*
   6471 	 * Allocate the control data structures, and create and load the
   6472 	 * DMA map for it.
   6473 	 *
   6474 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6475 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6476 	 * both sets within the same 4G segment.
   6477 	 */
   6478 	if (sc->sc_type < WM_T_82544)
   6479 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6480 	else
   6481 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6482 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6483 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6484 	else
   6485 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6486 
   6487 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6488 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6489 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6490 		aprint_error_dev(sc->sc_dev,
   6491 		    "unable to allocate TX control data, error = %d\n",
   6492 		    error);
   6493 		goto fail_0;
   6494 	}
   6495 
   6496 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6497 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6498 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6499 		aprint_error_dev(sc->sc_dev,
   6500 		    "unable to map TX control data, error = %d\n", error);
   6501 		goto fail_1;
   6502 	}
   6503 
   6504 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6505 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6506 		aprint_error_dev(sc->sc_dev,
   6507 		    "unable to create TX control data DMA map, error = %d\n",
   6508 		    error);
   6509 		goto fail_2;
   6510 	}
   6511 
   6512 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6513 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6514 		aprint_error_dev(sc->sc_dev,
   6515 		    "unable to load TX control data DMA map, error = %d\n",
   6516 		    error);
   6517 		goto fail_3;
   6518 	}
   6519 
   6520 	return 0;
   6521 
   6522  fail_3:
   6523 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6524  fail_2:
   6525 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6526 	    WM_TXDESCS_SIZE(txq));
   6527  fail_1:
   6528 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6529  fail_0:
   6530 	return error;
   6531 }
   6532 
   6533 static void
   6534 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6535 {
   6536 
   6537 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6538 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6539 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6540 	    WM_TXDESCS_SIZE(txq));
   6541 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6542 }
   6543 
   6544 static int
   6545 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6546 {
   6547 	int error;
   6548 	size_t rxq_descs_size;
   6549 
   6550 	/*
   6551 	 * Allocate the control data structures, and create and load the
   6552 	 * DMA map for it.
   6553 	 *
   6554 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6555 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6556 	 * both sets within the same 4G segment.
   6557 	 */
   6558 	rxq->rxq_ndesc = WM_NRXDESC;
   6559 	if (sc->sc_type == WM_T_82574)
   6560 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6561 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6562 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6563 	else
   6564 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6565 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6566 
   6567 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6568 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6569 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6570 		aprint_error_dev(sc->sc_dev,
   6571 		    "unable to allocate RX control data, error = %d\n",
   6572 		    error);
   6573 		goto fail_0;
   6574 	}
   6575 
   6576 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6577 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6578 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6579 		aprint_error_dev(sc->sc_dev,
   6580 		    "unable to map RX control data, error = %d\n", error);
   6581 		goto fail_1;
   6582 	}
   6583 
   6584 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6585 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6586 		aprint_error_dev(sc->sc_dev,
   6587 		    "unable to create RX control data DMA map, error = %d\n",
   6588 		    error);
   6589 		goto fail_2;
   6590 	}
   6591 
   6592 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6593 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6594 		aprint_error_dev(sc->sc_dev,
   6595 		    "unable to load RX control data DMA map, error = %d\n",
   6596 		    error);
   6597 		goto fail_3;
   6598 	}
   6599 
   6600 	return 0;
   6601 
   6602  fail_3:
   6603 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6604  fail_2:
   6605 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6606 	    rxq_descs_size);
   6607  fail_1:
   6608 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6609  fail_0:
   6610 	return error;
   6611 }
   6612 
   6613 static void
   6614 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6615 {
   6616 
   6617 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6618 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6619 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6620 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6621 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6622 }
   6623 
   6624 
   6625 static int
   6626 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6627 {
   6628 	int i, error;
   6629 
   6630 	/* Create the transmit buffer DMA maps. */
   6631 	WM_TXQUEUELEN(txq) =
   6632 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6633 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6634 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6635 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6636 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6637 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6638 			aprint_error_dev(sc->sc_dev,
   6639 			    "unable to create Tx DMA map %d, error = %d\n",
   6640 			    i, error);
   6641 			goto fail;
   6642 		}
   6643 	}
   6644 
   6645 	return 0;
   6646 
   6647  fail:
   6648 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6649 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6650 			bus_dmamap_destroy(sc->sc_dmat,
   6651 			    txq->txq_soft[i].txs_dmamap);
   6652 	}
   6653 	return error;
   6654 }
   6655 
   6656 static void
   6657 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6658 {
   6659 	int i;
   6660 
   6661 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6662 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6663 			bus_dmamap_destroy(sc->sc_dmat,
   6664 			    txq->txq_soft[i].txs_dmamap);
   6665 	}
   6666 }
   6667 
   6668 static int
   6669 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6670 {
   6671 	int i, error;
   6672 
   6673 	/* Create the receive buffer DMA maps. */
   6674 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6675 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6676 			    MCLBYTES, 0, 0,
   6677 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6678 			aprint_error_dev(sc->sc_dev,
   6679 			    "unable to create Rx DMA map %d error = %d\n",
   6680 			    i, error);
   6681 			goto fail;
   6682 		}
   6683 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6684 	}
   6685 
   6686 	return 0;
   6687 
   6688  fail:
   6689 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6690 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6691 			bus_dmamap_destroy(sc->sc_dmat,
   6692 			    rxq->rxq_soft[i].rxs_dmamap);
   6693 	}
   6694 	return error;
   6695 }
   6696 
   6697 static void
   6698 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6699 {
   6700 	int i;
   6701 
   6702 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6703 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6704 			bus_dmamap_destroy(sc->sc_dmat,
   6705 			    rxq->rxq_soft[i].rxs_dmamap);
   6706 	}
   6707 }
   6708 
   6709 /*
   6710  * wm_alloc_quques:
   6711  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6712  */
   6713 static int
   6714 wm_alloc_txrx_queues(struct wm_softc *sc)
   6715 {
   6716 	int i, error, tx_done, rx_done;
   6717 
   6718 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6719 	    KM_SLEEP);
   6720 	if (sc->sc_queue == NULL) {
   6721 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6722 		error = ENOMEM;
   6723 		goto fail_0;
   6724 	}
   6725 
   6726 	/*
   6727 	 * For transmission
   6728 	 */
   6729 	error = 0;
   6730 	tx_done = 0;
   6731 	for (i = 0; i < sc->sc_nqueues; i++) {
   6732 #ifdef WM_EVENT_COUNTERS
   6733 		int j;
   6734 		const char *xname;
   6735 #endif
   6736 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6737 		txq->txq_sc = sc;
   6738 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6739 
   6740 		error = wm_alloc_tx_descs(sc, txq);
   6741 		if (error)
   6742 			break;
   6743 		error = wm_alloc_tx_buffer(sc, txq);
   6744 		if (error) {
   6745 			wm_free_tx_descs(sc, txq);
   6746 			break;
   6747 		}
   6748 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6749 		if (txq->txq_interq == NULL) {
   6750 			wm_free_tx_descs(sc, txq);
   6751 			wm_free_tx_buffer(sc, txq);
   6752 			error = ENOMEM;
   6753 			break;
   6754 		}
   6755 
   6756 #ifdef WM_EVENT_COUNTERS
   6757 		xname = device_xname(sc->sc_dev);
   6758 
   6759 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6760 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6761 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6762 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6763 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6764 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6765 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6770 
   6771 		for (j = 0; j < WM_NTXSEGS; j++) {
   6772 			snprintf(txq->txq_txseg_evcnt_names[j],
   6773 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6774 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6775 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6776 		}
   6777 
   6778 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6779 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6780 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6781 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6782 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6783 #endif /* WM_EVENT_COUNTERS */
   6784 
   6785 		tx_done++;
   6786 	}
   6787 	if (error)
   6788 		goto fail_1;
   6789 
   6790 	/*
   6791 	 * For recieve
   6792 	 */
   6793 	error = 0;
   6794 	rx_done = 0;
   6795 	for (i = 0; i < sc->sc_nqueues; i++) {
   6796 #ifdef WM_EVENT_COUNTERS
   6797 		const char *xname;
   6798 #endif
   6799 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6800 		rxq->rxq_sc = sc;
   6801 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6802 
   6803 		error = wm_alloc_rx_descs(sc, rxq);
   6804 		if (error)
   6805 			break;
   6806 
   6807 		error = wm_alloc_rx_buffer(sc, rxq);
   6808 		if (error) {
   6809 			wm_free_rx_descs(sc, rxq);
   6810 			break;
   6811 		}
   6812 
   6813 #ifdef WM_EVENT_COUNTERS
   6814 		xname = device_xname(sc->sc_dev);
   6815 
   6816 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6817 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6818 
   6819 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6820 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6821 #endif /* WM_EVENT_COUNTERS */
   6822 
   6823 		rx_done++;
   6824 	}
   6825 	if (error)
   6826 		goto fail_2;
   6827 
   6828 	return 0;
   6829 
   6830  fail_2:
   6831 	for (i = 0; i < rx_done; i++) {
   6832 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6833 		wm_free_rx_buffer(sc, rxq);
   6834 		wm_free_rx_descs(sc, rxq);
   6835 		if (rxq->rxq_lock)
   6836 			mutex_obj_free(rxq->rxq_lock);
   6837 	}
   6838  fail_1:
   6839 	for (i = 0; i < tx_done; i++) {
   6840 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6841 		pcq_destroy(txq->txq_interq);
   6842 		wm_free_tx_buffer(sc, txq);
   6843 		wm_free_tx_descs(sc, txq);
   6844 		if (txq->txq_lock)
   6845 			mutex_obj_free(txq->txq_lock);
   6846 	}
   6847 
   6848 	kmem_free(sc->sc_queue,
   6849 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6850  fail_0:
   6851 	return error;
   6852 }
   6853 
   6854 /*
   6855  * wm_free_quques:
   6856  *	Free {tx,rx}descs and {tx,rx} buffers
   6857  */
   6858 static void
   6859 wm_free_txrx_queues(struct wm_softc *sc)
   6860 {
   6861 	int i;
   6862 
   6863 	for (i = 0; i < sc->sc_nqueues; i++) {
   6864 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6865 
   6866 #ifdef WM_EVENT_COUNTERS
   6867 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6868 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6869 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6870 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6871 #endif /* WM_EVENT_COUNTERS */
   6872 
   6873 		wm_free_rx_buffer(sc, rxq);
   6874 		wm_free_rx_descs(sc, rxq);
   6875 		if (rxq->rxq_lock)
   6876 			mutex_obj_free(rxq->rxq_lock);
   6877 	}
   6878 
   6879 	for (i = 0; i < sc->sc_nqueues; i++) {
   6880 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6881 		struct mbuf *m;
   6882 #ifdef WM_EVENT_COUNTERS
   6883 		int j;
   6884 
   6885 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6886 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6887 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6888 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6893 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6894 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6895 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6896 
   6897 		for (j = 0; j < WM_NTXSEGS; j++)
   6898 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6899 
   6900 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6901 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6902 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6905 #endif /* WM_EVENT_COUNTERS */
   6906 
   6907 		/* drain txq_interq */
   6908 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6909 			m_freem(m);
   6910 		pcq_destroy(txq->txq_interq);
   6911 
   6912 		wm_free_tx_buffer(sc, txq);
   6913 		wm_free_tx_descs(sc, txq);
   6914 		if (txq->txq_lock)
   6915 			mutex_obj_free(txq->txq_lock);
   6916 	}
   6917 
   6918 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6919 }
   6920 
   6921 static void
   6922 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6923 {
   6924 
   6925 	KASSERT(mutex_owned(txq->txq_lock));
   6926 
   6927 	/* Initialize the transmit descriptor ring. */
   6928 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6929 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6930 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6931 	txq->txq_free = WM_NTXDESC(txq);
   6932 	txq->txq_next = 0;
   6933 }
   6934 
   6935 static void
   6936 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6937     struct wm_txqueue *txq)
   6938 {
   6939 
   6940 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6941 		device_xname(sc->sc_dev), __func__));
   6942 	KASSERT(mutex_owned(txq->txq_lock));
   6943 
   6944 	if (sc->sc_type < WM_T_82543) {
   6945 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6946 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6947 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6948 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6949 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6950 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6951 	} else {
   6952 		int qid = wmq->wmq_id;
   6953 
   6954 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6955 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6956 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6957 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6958 
   6959 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6960 			/*
   6961 			 * Don't write TDT before TCTL.EN is set.
   6962 			 * See the document.
   6963 			 */
   6964 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6965 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6966 			    | TXDCTL_WTHRESH(0));
   6967 		else {
   6968 			/* XXX should update with AIM? */
   6969 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6970 			if (sc->sc_type >= WM_T_82540) {
   6971 				/* should be same */
   6972 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6973 			}
   6974 
   6975 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6976 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6977 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6978 		}
   6979 	}
   6980 }
   6981 
   6982 static void
   6983 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6984 {
   6985 	int i;
   6986 
   6987 	KASSERT(mutex_owned(txq->txq_lock));
   6988 
   6989 	/* Initialize the transmit job descriptors. */
   6990 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6991 		txq->txq_soft[i].txs_mbuf = NULL;
   6992 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6993 	txq->txq_snext = 0;
   6994 	txq->txq_sdirty = 0;
   6995 }
   6996 
   6997 static void
   6998 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6999     struct wm_txqueue *txq)
   7000 {
   7001 
   7002 	KASSERT(mutex_owned(txq->txq_lock));
   7003 
   7004 	/*
   7005 	 * Set up some register offsets that are different between
   7006 	 * the i82542 and the i82543 and later chips.
   7007 	 */
   7008 	if (sc->sc_type < WM_T_82543)
   7009 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7010 	else
   7011 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7012 
   7013 	wm_init_tx_descs(sc, txq);
   7014 	wm_init_tx_regs(sc, wmq, txq);
   7015 	wm_init_tx_buffer(sc, txq);
   7016 
   7017 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7018 	txq->txq_sending = false;
   7019 }
   7020 
   7021 static void
   7022 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7023     struct wm_rxqueue *rxq)
   7024 {
   7025 
   7026 	KASSERT(mutex_owned(rxq->rxq_lock));
   7027 
   7028 	/*
   7029 	 * Initialize the receive descriptor and receive job
   7030 	 * descriptor rings.
   7031 	 */
   7032 	if (sc->sc_type < WM_T_82543) {
   7033 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7034 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7035 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7036 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7037 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7038 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7039 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7040 
   7041 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7042 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7043 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7044 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7045 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7046 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7047 	} else {
   7048 		int qid = wmq->wmq_id;
   7049 
   7050 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7051 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7052 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7053 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7054 
   7055 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7056 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7057 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7058 
   7059 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7060 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7061 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7062 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7063 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7064 			    | RXDCTL_WTHRESH(1));
   7065 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7066 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7067 		} else {
   7068 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7069 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7070 			/* XXX should update with AIM? */
   7071 			CSR_WRITE(sc, WMREG_RDTR,
   7072 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7073 			/* MUST be same */
   7074 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7075 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7076 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7077 		}
   7078 	}
   7079 }
   7080 
   7081 static int
   7082 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7083 {
   7084 	struct wm_rxsoft *rxs;
   7085 	int error, i;
   7086 
   7087 	KASSERT(mutex_owned(rxq->rxq_lock));
   7088 
   7089 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7090 		rxs = &rxq->rxq_soft[i];
   7091 		if (rxs->rxs_mbuf == NULL) {
   7092 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7093 				log(LOG_ERR, "%s: unable to allocate or map "
   7094 				    "rx buffer %d, error = %d\n",
   7095 				    device_xname(sc->sc_dev), i, error);
   7096 				/*
   7097 				 * XXX Should attempt to run with fewer receive
   7098 				 * XXX buffers instead of just failing.
   7099 				 */
   7100 				wm_rxdrain(rxq);
   7101 				return ENOMEM;
   7102 			}
   7103 		} else {
   7104 			/*
   7105 			 * For 82575 and 82576, the RX descriptors must be
   7106 			 * initialized after the setting of RCTL.EN in
   7107 			 * wm_set_filter()
   7108 			 */
   7109 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7110 				wm_init_rxdesc(rxq, i);
   7111 		}
   7112 	}
   7113 	rxq->rxq_ptr = 0;
   7114 	rxq->rxq_discard = 0;
   7115 	WM_RXCHAIN_RESET(rxq);
   7116 
   7117 	return 0;
   7118 }
   7119 
   7120 static int
   7121 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7122     struct wm_rxqueue *rxq)
   7123 {
   7124 
   7125 	KASSERT(mutex_owned(rxq->rxq_lock));
   7126 
   7127 	/*
   7128 	 * Set up some register offsets that are different between
   7129 	 * the i82542 and the i82543 and later chips.
   7130 	 */
   7131 	if (sc->sc_type < WM_T_82543)
   7132 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7133 	else
   7134 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7135 
   7136 	wm_init_rx_regs(sc, wmq, rxq);
   7137 	return wm_init_rx_buffer(sc, rxq);
   7138 }
   7139 
   7140 /*
   7141  * wm_init_quques:
   7142  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7143  */
   7144 static int
   7145 wm_init_txrx_queues(struct wm_softc *sc)
   7146 {
   7147 	int i, error = 0;
   7148 
   7149 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7150 		device_xname(sc->sc_dev), __func__));
   7151 
   7152 	for (i = 0; i < sc->sc_nqueues; i++) {
   7153 		struct wm_queue *wmq = &sc->sc_queue[i];
   7154 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7155 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7156 
   7157 		/*
   7158 		 * TODO
   7159 		 * Currently, use constant variable instead of AIM.
   7160 		 * Furthermore, the interrupt interval of multiqueue which use
   7161 		 * polling mode is less than default value.
   7162 		 * More tuning and AIM are required.
   7163 		 */
   7164 		if (wm_is_using_multiqueue(sc))
   7165 			wmq->wmq_itr = 50;
   7166 		else
   7167 			wmq->wmq_itr = sc->sc_itr_init;
   7168 		wmq->wmq_set_itr = true;
   7169 
   7170 		mutex_enter(txq->txq_lock);
   7171 		wm_init_tx_queue(sc, wmq, txq);
   7172 		mutex_exit(txq->txq_lock);
   7173 
   7174 		mutex_enter(rxq->rxq_lock);
   7175 		error = wm_init_rx_queue(sc, wmq, rxq);
   7176 		mutex_exit(rxq->rxq_lock);
   7177 		if (error)
   7178 			break;
   7179 	}
   7180 
   7181 	return error;
   7182 }
   7183 
   7184 /*
   7185  * wm_tx_offload:
   7186  *
   7187  *	Set up TCP/IP checksumming parameters for the
   7188  *	specified packet.
   7189  */
   7190 static int
   7191 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7192     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7193 {
   7194 	struct mbuf *m0 = txs->txs_mbuf;
   7195 	struct livengood_tcpip_ctxdesc *t;
   7196 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7197 	uint32_t ipcse;
   7198 	struct ether_header *eh;
   7199 	int offset, iphl;
   7200 	uint8_t fields;
   7201 
   7202 	/*
   7203 	 * XXX It would be nice if the mbuf pkthdr had offset
   7204 	 * fields for the protocol headers.
   7205 	 */
   7206 
   7207 	eh = mtod(m0, struct ether_header *);
   7208 	switch (htons(eh->ether_type)) {
   7209 	case ETHERTYPE_IP:
   7210 	case ETHERTYPE_IPV6:
   7211 		offset = ETHER_HDR_LEN;
   7212 		break;
   7213 
   7214 	case ETHERTYPE_VLAN:
   7215 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7216 		break;
   7217 
   7218 	default:
   7219 		/*
   7220 		 * Don't support this protocol or encapsulation.
   7221 		 */
   7222 		*fieldsp = 0;
   7223 		*cmdp = 0;
   7224 		return 0;
   7225 	}
   7226 
   7227 	if ((m0->m_pkthdr.csum_flags &
   7228 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7229 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7230 	} else
   7231 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7232 
   7233 	ipcse = offset + iphl - 1;
   7234 
   7235 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7236 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7237 	seg = 0;
   7238 	fields = 0;
   7239 
   7240 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7241 		int hlen = offset + iphl;
   7242 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7243 
   7244 		if (__predict_false(m0->m_len <
   7245 				    (hlen + sizeof(struct tcphdr)))) {
   7246 			/*
   7247 			 * TCP/IP headers are not in the first mbuf; we need
   7248 			 * to do this the slow and painful way. Let's just
   7249 			 * hope this doesn't happen very often.
   7250 			 */
   7251 			struct tcphdr th;
   7252 
   7253 			WM_Q_EVCNT_INCR(txq, tsopain);
   7254 
   7255 			m_copydata(m0, hlen, sizeof(th), &th);
   7256 			if (v4) {
   7257 				struct ip ip;
   7258 
   7259 				m_copydata(m0, offset, sizeof(ip), &ip);
   7260 				ip.ip_len = 0;
   7261 				m_copyback(m0,
   7262 				    offset + offsetof(struct ip, ip_len),
   7263 				    sizeof(ip.ip_len), &ip.ip_len);
   7264 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7265 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7266 			} else {
   7267 				struct ip6_hdr ip6;
   7268 
   7269 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7270 				ip6.ip6_plen = 0;
   7271 				m_copyback(m0,
   7272 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7273 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7274 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7275 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7276 			}
   7277 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7278 			    sizeof(th.th_sum), &th.th_sum);
   7279 
   7280 			hlen += th.th_off << 2;
   7281 		} else {
   7282 			/*
   7283 			 * TCP/IP headers are in the first mbuf; we can do
   7284 			 * this the easy way.
   7285 			 */
   7286 			struct tcphdr *th;
   7287 
   7288 			if (v4) {
   7289 				struct ip *ip =
   7290 				    (void *)(mtod(m0, char *) + offset);
   7291 				th = (void *)(mtod(m0, char *) + hlen);
   7292 
   7293 				ip->ip_len = 0;
   7294 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7295 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7296 			} else {
   7297 				struct ip6_hdr *ip6 =
   7298 				    (void *)(mtod(m0, char *) + offset);
   7299 				th = (void *)(mtod(m0, char *) + hlen);
   7300 
   7301 				ip6->ip6_plen = 0;
   7302 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7303 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7304 			}
   7305 			hlen += th->th_off << 2;
   7306 		}
   7307 
   7308 		if (v4) {
   7309 			WM_Q_EVCNT_INCR(txq, tso);
   7310 			cmdlen |= WTX_TCPIP_CMD_IP;
   7311 		} else {
   7312 			WM_Q_EVCNT_INCR(txq, tso6);
   7313 			ipcse = 0;
   7314 		}
   7315 		cmd |= WTX_TCPIP_CMD_TSE;
   7316 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7317 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7318 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7319 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7320 	}
   7321 
   7322 	/*
   7323 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7324 	 * offload feature, if we load the context descriptor, we
   7325 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7326 	 */
   7327 
   7328 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7329 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7330 	    WTX_TCPIP_IPCSE(ipcse);
   7331 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7332 		WM_Q_EVCNT_INCR(txq, ipsum);
   7333 		fields |= WTX_IXSM;
   7334 	}
   7335 
   7336 	offset += iphl;
   7337 
   7338 	if (m0->m_pkthdr.csum_flags &
   7339 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7340 		WM_Q_EVCNT_INCR(txq, tusum);
   7341 		fields |= WTX_TXSM;
   7342 		tucs = WTX_TCPIP_TUCSS(offset) |
   7343 		    WTX_TCPIP_TUCSO(offset +
   7344 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7345 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7346 	} else if ((m0->m_pkthdr.csum_flags &
   7347 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7348 		WM_Q_EVCNT_INCR(txq, tusum6);
   7349 		fields |= WTX_TXSM;
   7350 		tucs = WTX_TCPIP_TUCSS(offset) |
   7351 		    WTX_TCPIP_TUCSO(offset +
   7352 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7353 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7354 	} else {
   7355 		/* Just initialize it to a valid TCP context. */
   7356 		tucs = WTX_TCPIP_TUCSS(offset) |
   7357 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7358 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7359 	}
   7360 
   7361 	/*
   7362 	 * We don't have to write context descriptor for every packet
   7363 	 * except for 82574. For 82574, we must write context descriptor
   7364 	 * for every packet when we use two descriptor queues.
   7365 	 * It would be overhead to write context descriptor for every packet,
   7366 	 * however it does not cause problems.
   7367 	 */
   7368 	/* Fill in the context descriptor. */
   7369 	t = (struct livengood_tcpip_ctxdesc *)
   7370 	    &txq->txq_descs[txq->txq_next];
   7371 	t->tcpip_ipcs = htole32(ipcs);
   7372 	t->tcpip_tucs = htole32(tucs);
   7373 	t->tcpip_cmdlen = htole32(cmdlen);
   7374 	t->tcpip_seg = htole32(seg);
   7375 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7376 
   7377 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7378 	txs->txs_ndesc++;
   7379 
   7380 	*cmdp = cmd;
   7381 	*fieldsp = fields;
   7382 
   7383 	return 0;
   7384 }
   7385 
   7386 static inline int
   7387 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7388 {
   7389 	struct wm_softc *sc = ifp->if_softc;
   7390 	u_int cpuid = cpu_index(curcpu());
   7391 
   7392 	/*
   7393 	 * Currently, simple distribute strategy.
   7394 	 * TODO:
   7395 	 * distribute by flowid(RSS has value).
   7396 	 */
   7397 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7398 }
   7399 
   7400 /*
   7401  * wm_start:		[ifnet interface function]
   7402  *
   7403  *	Start packet transmission on the interface.
   7404  */
   7405 static void
   7406 wm_start(struct ifnet *ifp)
   7407 {
   7408 	struct wm_softc *sc = ifp->if_softc;
   7409 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7410 
   7411 #ifdef WM_MPSAFE
   7412 	KASSERT(if_is_mpsafe(ifp));
   7413 #endif
   7414 	/*
   7415 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7416 	 */
   7417 
   7418 	mutex_enter(txq->txq_lock);
   7419 	if (!txq->txq_stopping)
   7420 		wm_start_locked(ifp);
   7421 	mutex_exit(txq->txq_lock);
   7422 }
   7423 
   7424 static void
   7425 wm_start_locked(struct ifnet *ifp)
   7426 {
   7427 	struct wm_softc *sc = ifp->if_softc;
   7428 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7429 
   7430 	wm_send_common_locked(ifp, txq, false);
   7431 }
   7432 
   7433 static int
   7434 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7435 {
   7436 	int qid;
   7437 	struct wm_softc *sc = ifp->if_softc;
   7438 	struct wm_txqueue *txq;
   7439 
   7440 	qid = wm_select_txqueue(ifp, m);
   7441 	txq = &sc->sc_queue[qid].wmq_txq;
   7442 
   7443 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7444 		m_freem(m);
   7445 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7446 		return ENOBUFS;
   7447 	}
   7448 
   7449 	/*
   7450 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7451 	 */
   7452 	ifp->if_obytes += m->m_pkthdr.len;
   7453 	if (m->m_flags & M_MCAST)
   7454 		ifp->if_omcasts++;
   7455 
   7456 	if (mutex_tryenter(txq->txq_lock)) {
   7457 		if (!txq->txq_stopping)
   7458 			wm_transmit_locked(ifp, txq);
   7459 		mutex_exit(txq->txq_lock);
   7460 	}
   7461 
   7462 	return 0;
   7463 }
   7464 
   7465 static void
   7466 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7467 {
   7468 
   7469 	wm_send_common_locked(ifp, txq, true);
   7470 }
   7471 
   7472 static void
   7473 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7474     bool is_transmit)
   7475 {
   7476 	struct wm_softc *sc = ifp->if_softc;
   7477 	struct mbuf *m0;
   7478 	struct wm_txsoft *txs;
   7479 	bus_dmamap_t dmamap;
   7480 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7481 	bus_addr_t curaddr;
   7482 	bus_size_t seglen, curlen;
   7483 	uint32_t cksumcmd;
   7484 	uint8_t cksumfields;
   7485 	bool remap = true;
   7486 
   7487 	KASSERT(mutex_owned(txq->txq_lock));
   7488 
   7489 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7490 		return;
   7491 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7492 		return;
   7493 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7494 		return;
   7495 
   7496 	/* Remember the previous number of free descriptors. */
   7497 	ofree = txq->txq_free;
   7498 
   7499 	/*
   7500 	 * Loop through the send queue, setting up transmit descriptors
   7501 	 * until we drain the queue, or use up all available transmit
   7502 	 * descriptors.
   7503 	 */
   7504 	for (;;) {
   7505 		m0 = NULL;
   7506 
   7507 		/* Get a work queue entry. */
   7508 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7509 			wm_txeof(txq, UINT_MAX);
   7510 			if (txq->txq_sfree == 0) {
   7511 				DPRINTF(WM_DEBUG_TX,
   7512 				    ("%s: TX: no free job descriptors\n",
   7513 					device_xname(sc->sc_dev)));
   7514 				WM_Q_EVCNT_INCR(txq, txsstall);
   7515 				break;
   7516 			}
   7517 		}
   7518 
   7519 		/* Grab a packet off the queue. */
   7520 		if (is_transmit)
   7521 			m0 = pcq_get(txq->txq_interq);
   7522 		else
   7523 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7524 		if (m0 == NULL)
   7525 			break;
   7526 
   7527 		DPRINTF(WM_DEBUG_TX,
   7528 		    ("%s: TX: have packet to transmit: %p\n",
   7529 			device_xname(sc->sc_dev), m0));
   7530 
   7531 		txs = &txq->txq_soft[txq->txq_snext];
   7532 		dmamap = txs->txs_dmamap;
   7533 
   7534 		use_tso = (m0->m_pkthdr.csum_flags &
   7535 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7536 
   7537 		/*
   7538 		 * So says the Linux driver:
   7539 		 * The controller does a simple calculation to make sure
   7540 		 * there is enough room in the FIFO before initiating the
   7541 		 * DMA for each buffer. The calc is:
   7542 		 *	4 = ceil(buffer len / MSS)
   7543 		 * To make sure we don't overrun the FIFO, adjust the max
   7544 		 * buffer len if the MSS drops.
   7545 		 */
   7546 		dmamap->dm_maxsegsz =
   7547 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7548 		    ? m0->m_pkthdr.segsz << 2
   7549 		    : WTX_MAX_LEN;
   7550 
   7551 		/*
   7552 		 * Load the DMA map.  If this fails, the packet either
   7553 		 * didn't fit in the allotted number of segments, or we
   7554 		 * were short on resources.  For the too-many-segments
   7555 		 * case, we simply report an error and drop the packet,
   7556 		 * since we can't sanely copy a jumbo packet to a single
   7557 		 * buffer.
   7558 		 */
   7559 retry:
   7560 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7561 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7562 		if (__predict_false(error)) {
   7563 			if (error == EFBIG) {
   7564 				if (remap == true) {
   7565 					struct mbuf *m;
   7566 
   7567 					remap = false;
   7568 					m = m_defrag(m0, M_NOWAIT);
   7569 					if (m != NULL) {
   7570 						WM_Q_EVCNT_INCR(txq, defrag);
   7571 						m0 = m;
   7572 						goto retry;
   7573 					}
   7574 				}
   7575 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7576 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7577 				    "DMA segments, dropping...\n",
   7578 				    device_xname(sc->sc_dev));
   7579 				wm_dump_mbuf_chain(sc, m0);
   7580 				m_freem(m0);
   7581 				continue;
   7582 			}
   7583 			/*  Short on resources, just stop for now. */
   7584 			DPRINTF(WM_DEBUG_TX,
   7585 			    ("%s: TX: dmamap load failed: %d\n",
   7586 				device_xname(sc->sc_dev), error));
   7587 			break;
   7588 		}
   7589 
   7590 		segs_needed = dmamap->dm_nsegs;
   7591 		if (use_tso) {
   7592 			/* For sentinel descriptor; see below. */
   7593 			segs_needed++;
   7594 		}
   7595 
   7596 		/*
   7597 		 * Ensure we have enough descriptors free to describe
   7598 		 * the packet. Note, we always reserve one descriptor
   7599 		 * at the end of the ring due to the semantics of the
   7600 		 * TDT register, plus one more in the event we need
   7601 		 * to load offload context.
   7602 		 */
   7603 		if (segs_needed > txq->txq_free - 2) {
   7604 			/*
   7605 			 * Not enough free descriptors to transmit this
   7606 			 * packet.  We haven't committed anything yet,
   7607 			 * so just unload the DMA map, put the packet
   7608 			 * pack on the queue, and punt. Notify the upper
   7609 			 * layer that there are no more slots left.
   7610 			 */
   7611 			DPRINTF(WM_DEBUG_TX,
   7612 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7613 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7614 				segs_needed, txq->txq_free - 1));
   7615 			if (!is_transmit)
   7616 				ifp->if_flags |= IFF_OACTIVE;
   7617 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7618 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7619 			WM_Q_EVCNT_INCR(txq, txdstall);
   7620 			break;
   7621 		}
   7622 
   7623 		/*
   7624 		 * Check for 82547 Tx FIFO bug. We need to do this
   7625 		 * once we know we can transmit the packet, since we
   7626 		 * do some internal FIFO space accounting here.
   7627 		 */
   7628 		if (sc->sc_type == WM_T_82547 &&
   7629 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7630 			DPRINTF(WM_DEBUG_TX,
   7631 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7632 				device_xname(sc->sc_dev)));
   7633 			if (!is_transmit)
   7634 				ifp->if_flags |= IFF_OACTIVE;
   7635 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7636 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7637 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7638 			break;
   7639 		}
   7640 
   7641 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7642 
   7643 		DPRINTF(WM_DEBUG_TX,
   7644 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7645 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7646 
   7647 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7648 
   7649 		/*
   7650 		 * Store a pointer to the packet so that we can free it
   7651 		 * later.
   7652 		 *
   7653 		 * Initially, we consider the number of descriptors the
   7654 		 * packet uses the number of DMA segments.  This may be
   7655 		 * incremented by 1 if we do checksum offload (a descriptor
   7656 		 * is used to set the checksum context).
   7657 		 */
   7658 		txs->txs_mbuf = m0;
   7659 		txs->txs_firstdesc = txq->txq_next;
   7660 		txs->txs_ndesc = segs_needed;
   7661 
   7662 		/* Set up offload parameters for this packet. */
   7663 		if (m0->m_pkthdr.csum_flags &
   7664 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7665 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7666 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7667 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7668 					  &cksumfields) != 0) {
   7669 				/* Error message already displayed. */
   7670 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7671 				continue;
   7672 			}
   7673 		} else {
   7674 			cksumcmd = 0;
   7675 			cksumfields = 0;
   7676 		}
   7677 
   7678 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7679 
   7680 		/* Sync the DMA map. */
   7681 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7682 		    BUS_DMASYNC_PREWRITE);
   7683 
   7684 		/* Initialize the transmit descriptor. */
   7685 		for (nexttx = txq->txq_next, seg = 0;
   7686 		     seg < dmamap->dm_nsegs; seg++) {
   7687 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7688 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7689 			     seglen != 0;
   7690 			     curaddr += curlen, seglen -= curlen,
   7691 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7692 				curlen = seglen;
   7693 
   7694 				/*
   7695 				 * So says the Linux driver:
   7696 				 * Work around for premature descriptor
   7697 				 * write-backs in TSO mode.  Append a
   7698 				 * 4-byte sentinel descriptor.
   7699 				 */
   7700 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7701 				    curlen > 8)
   7702 					curlen -= 4;
   7703 
   7704 				wm_set_dma_addr(
   7705 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7706 				txq->txq_descs[nexttx].wtx_cmdlen
   7707 				    = htole32(cksumcmd | curlen);
   7708 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7709 				    = 0;
   7710 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7711 				    = cksumfields;
   7712 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7713 				lasttx = nexttx;
   7714 
   7715 				DPRINTF(WM_DEBUG_TX,
   7716 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7717 					"len %#04zx\n",
   7718 					device_xname(sc->sc_dev), nexttx,
   7719 					(uint64_t)curaddr, curlen));
   7720 			}
   7721 		}
   7722 
   7723 		KASSERT(lasttx != -1);
   7724 
   7725 		/*
   7726 		 * Set up the command byte on the last descriptor of
   7727 		 * the packet. If we're in the interrupt delay window,
   7728 		 * delay the interrupt.
   7729 		 */
   7730 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7731 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7732 
   7733 		/*
   7734 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7735 		 * up the descriptor to encapsulate the packet for us.
   7736 		 *
   7737 		 * This is only valid on the last descriptor of the packet.
   7738 		 */
   7739 		if (vlan_has_tag(m0)) {
   7740 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7741 			    htole32(WTX_CMD_VLE);
   7742 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7743 			    = htole16(vlan_get_tag(m0));
   7744 		}
   7745 
   7746 		txs->txs_lastdesc = lasttx;
   7747 
   7748 		DPRINTF(WM_DEBUG_TX,
   7749 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7750 			device_xname(sc->sc_dev),
   7751 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7752 
   7753 		/* Sync the descriptors we're using. */
   7754 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7755 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7756 
   7757 		/* Give the packet to the chip. */
   7758 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7759 
   7760 		DPRINTF(WM_DEBUG_TX,
   7761 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7762 
   7763 		DPRINTF(WM_DEBUG_TX,
   7764 		    ("%s: TX: finished transmitting packet, job %d\n",
   7765 			device_xname(sc->sc_dev), txq->txq_snext));
   7766 
   7767 		/* Advance the tx pointer. */
   7768 		txq->txq_free -= txs->txs_ndesc;
   7769 		txq->txq_next = nexttx;
   7770 
   7771 		txq->txq_sfree--;
   7772 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7773 
   7774 		/* Pass the packet to any BPF listeners. */
   7775 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7776 	}
   7777 
   7778 	if (m0 != NULL) {
   7779 		if (!is_transmit)
   7780 			ifp->if_flags |= IFF_OACTIVE;
   7781 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7782 		WM_Q_EVCNT_INCR(txq, descdrop);
   7783 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7784 			__func__));
   7785 		m_freem(m0);
   7786 	}
   7787 
   7788 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7789 		/* No more slots; notify upper layer. */
   7790 		if (!is_transmit)
   7791 			ifp->if_flags |= IFF_OACTIVE;
   7792 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7793 	}
   7794 
   7795 	if (txq->txq_free != ofree) {
   7796 		/* Set a watchdog timer in case the chip flakes out. */
   7797 		txq->txq_lastsent = time_uptime;
   7798 		txq->txq_sending = true;
   7799 	}
   7800 }
   7801 
   7802 /*
   7803  * wm_nq_tx_offload:
   7804  *
   7805  *	Set up TCP/IP checksumming parameters for the
   7806  *	specified packet, for NEWQUEUE devices
   7807  */
   7808 static int
   7809 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7810     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7811 {
   7812 	struct mbuf *m0 = txs->txs_mbuf;
   7813 	uint32_t vl_len, mssidx, cmdc;
   7814 	struct ether_header *eh;
   7815 	int offset, iphl;
   7816 
   7817 	/*
   7818 	 * XXX It would be nice if the mbuf pkthdr had offset
   7819 	 * fields for the protocol headers.
   7820 	 */
   7821 	*cmdlenp = 0;
   7822 	*fieldsp = 0;
   7823 
   7824 	eh = mtod(m0, struct ether_header *);
   7825 	switch (htons(eh->ether_type)) {
   7826 	case ETHERTYPE_IP:
   7827 	case ETHERTYPE_IPV6:
   7828 		offset = ETHER_HDR_LEN;
   7829 		break;
   7830 
   7831 	case ETHERTYPE_VLAN:
   7832 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7833 		break;
   7834 
   7835 	default:
   7836 		/* Don't support this protocol or encapsulation. */
   7837 		*do_csum = false;
   7838 		return 0;
   7839 	}
   7840 	*do_csum = true;
   7841 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7842 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7843 
   7844 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7845 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7846 
   7847 	if ((m0->m_pkthdr.csum_flags &
   7848 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7849 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7850 	} else {
   7851 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7852 	}
   7853 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7854 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7855 
   7856 	if (vlan_has_tag(m0)) {
   7857 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7858 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7859 		*cmdlenp |= NQTX_CMD_VLE;
   7860 	}
   7861 
   7862 	mssidx = 0;
   7863 
   7864 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7865 		int hlen = offset + iphl;
   7866 		int tcp_hlen;
   7867 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7868 
   7869 		if (__predict_false(m0->m_len <
   7870 				    (hlen + sizeof(struct tcphdr)))) {
   7871 			/*
   7872 			 * TCP/IP headers are not in the first mbuf; we need
   7873 			 * to do this the slow and painful way. Let's just
   7874 			 * hope this doesn't happen very often.
   7875 			 */
   7876 			struct tcphdr th;
   7877 
   7878 			WM_Q_EVCNT_INCR(txq, tsopain);
   7879 
   7880 			m_copydata(m0, hlen, sizeof(th), &th);
   7881 			if (v4) {
   7882 				struct ip ip;
   7883 
   7884 				m_copydata(m0, offset, sizeof(ip), &ip);
   7885 				ip.ip_len = 0;
   7886 				m_copyback(m0,
   7887 				    offset + offsetof(struct ip, ip_len),
   7888 				    sizeof(ip.ip_len), &ip.ip_len);
   7889 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7890 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7891 			} else {
   7892 				struct ip6_hdr ip6;
   7893 
   7894 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7895 				ip6.ip6_plen = 0;
   7896 				m_copyback(m0,
   7897 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7898 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7899 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7900 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7901 			}
   7902 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7903 			    sizeof(th.th_sum), &th.th_sum);
   7904 
   7905 			tcp_hlen = th.th_off << 2;
   7906 		} else {
   7907 			/*
   7908 			 * TCP/IP headers are in the first mbuf; we can do
   7909 			 * this the easy way.
   7910 			 */
   7911 			struct tcphdr *th;
   7912 
   7913 			if (v4) {
   7914 				struct ip *ip =
   7915 				    (void *)(mtod(m0, char *) + offset);
   7916 				th = (void *)(mtod(m0, char *) + hlen);
   7917 
   7918 				ip->ip_len = 0;
   7919 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7920 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7921 			} else {
   7922 				struct ip6_hdr *ip6 =
   7923 				    (void *)(mtod(m0, char *) + offset);
   7924 				th = (void *)(mtod(m0, char *) + hlen);
   7925 
   7926 				ip6->ip6_plen = 0;
   7927 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7928 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7929 			}
   7930 			tcp_hlen = th->th_off << 2;
   7931 		}
   7932 		hlen += tcp_hlen;
   7933 		*cmdlenp |= NQTX_CMD_TSE;
   7934 
   7935 		if (v4) {
   7936 			WM_Q_EVCNT_INCR(txq, tso);
   7937 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7938 		} else {
   7939 			WM_Q_EVCNT_INCR(txq, tso6);
   7940 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7941 		}
   7942 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7943 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7944 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7945 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7946 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7947 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7948 	} else {
   7949 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7950 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7951 	}
   7952 
   7953 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7954 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7955 		cmdc |= NQTXC_CMD_IP4;
   7956 	}
   7957 
   7958 	if (m0->m_pkthdr.csum_flags &
   7959 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7960 		WM_Q_EVCNT_INCR(txq, tusum);
   7961 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7962 			cmdc |= NQTXC_CMD_TCP;
   7963 		else
   7964 			cmdc |= NQTXC_CMD_UDP;
   7965 
   7966 		cmdc |= NQTXC_CMD_IP4;
   7967 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7968 	}
   7969 	if (m0->m_pkthdr.csum_flags &
   7970 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7971 		WM_Q_EVCNT_INCR(txq, tusum6);
   7972 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7973 			cmdc |= NQTXC_CMD_TCP;
   7974 		else
   7975 			cmdc |= NQTXC_CMD_UDP;
   7976 
   7977 		cmdc |= NQTXC_CMD_IP6;
   7978 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7979 	}
   7980 
   7981 	/*
   7982 	 * We don't have to write context descriptor for every packet to
   7983 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7984 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7985 	 * controllers.
   7986 	 * It would be overhead to write context descriptor for every packet,
   7987 	 * however it does not cause problems.
   7988 	 */
   7989 	/* Fill in the context descriptor. */
   7990 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7991 	    htole32(vl_len);
   7992 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7993 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7994 	    htole32(cmdc);
   7995 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7996 	    htole32(mssidx);
   7997 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7998 	DPRINTF(WM_DEBUG_TX,
   7999 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8000 		txq->txq_next, 0, vl_len));
   8001 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8002 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8003 	txs->txs_ndesc++;
   8004 	return 0;
   8005 }
   8006 
   8007 /*
   8008  * wm_nq_start:		[ifnet interface function]
   8009  *
   8010  *	Start packet transmission on the interface for NEWQUEUE devices
   8011  */
   8012 static void
   8013 wm_nq_start(struct ifnet *ifp)
   8014 {
   8015 	struct wm_softc *sc = ifp->if_softc;
   8016 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8017 
   8018 #ifdef WM_MPSAFE
   8019 	KASSERT(if_is_mpsafe(ifp));
   8020 #endif
   8021 	/*
   8022 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8023 	 */
   8024 
   8025 	mutex_enter(txq->txq_lock);
   8026 	if (!txq->txq_stopping)
   8027 		wm_nq_start_locked(ifp);
   8028 	mutex_exit(txq->txq_lock);
   8029 }
   8030 
   8031 static void
   8032 wm_nq_start_locked(struct ifnet *ifp)
   8033 {
   8034 	struct wm_softc *sc = ifp->if_softc;
   8035 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8036 
   8037 	wm_nq_send_common_locked(ifp, txq, false);
   8038 }
   8039 
   8040 static int
   8041 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8042 {
   8043 	int qid;
   8044 	struct wm_softc *sc = ifp->if_softc;
   8045 	struct wm_txqueue *txq;
   8046 
   8047 	qid = wm_select_txqueue(ifp, m);
   8048 	txq = &sc->sc_queue[qid].wmq_txq;
   8049 
   8050 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8051 		m_freem(m);
   8052 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8053 		return ENOBUFS;
   8054 	}
   8055 
   8056 	/*
   8057 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   8058 	 */
   8059 	ifp->if_obytes += m->m_pkthdr.len;
   8060 	if (m->m_flags & M_MCAST)
   8061 		ifp->if_omcasts++;
   8062 
   8063 	/*
   8064 	 * The situations which this mutex_tryenter() fails at running time
   8065 	 * are below two patterns.
   8066 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8067 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8068 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8069 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8070 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8071 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8072 	 * stuck, either.
   8073 	 */
   8074 	if (mutex_tryenter(txq->txq_lock)) {
   8075 		if (!txq->txq_stopping)
   8076 			wm_nq_transmit_locked(ifp, txq);
   8077 		mutex_exit(txq->txq_lock);
   8078 	}
   8079 
   8080 	return 0;
   8081 }
   8082 
   8083 static void
   8084 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8085 {
   8086 
   8087 	wm_nq_send_common_locked(ifp, txq, true);
   8088 }
   8089 
   8090 static void
   8091 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8092     bool is_transmit)
   8093 {
   8094 	struct wm_softc *sc = ifp->if_softc;
   8095 	struct mbuf *m0;
   8096 	struct wm_txsoft *txs;
   8097 	bus_dmamap_t dmamap;
   8098 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8099 	bool do_csum, sent;
   8100 	bool remap = true;
   8101 
   8102 	KASSERT(mutex_owned(txq->txq_lock));
   8103 
   8104 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8105 		return;
   8106 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8107 		return;
   8108 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8109 		return;
   8110 
   8111 	sent = false;
   8112 
   8113 	/*
   8114 	 * Loop through the send queue, setting up transmit descriptors
   8115 	 * until we drain the queue, or use up all available transmit
   8116 	 * descriptors.
   8117 	 */
   8118 	for (;;) {
   8119 		m0 = NULL;
   8120 
   8121 		/* Get a work queue entry. */
   8122 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8123 			wm_txeof(txq, UINT_MAX);
   8124 			if (txq->txq_sfree == 0) {
   8125 				DPRINTF(WM_DEBUG_TX,
   8126 				    ("%s: TX: no free job descriptors\n",
   8127 					device_xname(sc->sc_dev)));
   8128 				WM_Q_EVCNT_INCR(txq, txsstall);
   8129 				break;
   8130 			}
   8131 		}
   8132 
   8133 		/* Grab a packet off the queue. */
   8134 		if (is_transmit)
   8135 			m0 = pcq_get(txq->txq_interq);
   8136 		else
   8137 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8138 		if (m0 == NULL)
   8139 			break;
   8140 
   8141 		DPRINTF(WM_DEBUG_TX,
   8142 		    ("%s: TX: have packet to transmit: %p\n",
   8143 		    device_xname(sc->sc_dev), m0));
   8144 
   8145 		txs = &txq->txq_soft[txq->txq_snext];
   8146 		dmamap = txs->txs_dmamap;
   8147 
   8148 		/*
   8149 		 * Load the DMA map.  If this fails, the packet either
   8150 		 * didn't fit in the allotted number of segments, or we
   8151 		 * were short on resources.  For the too-many-segments
   8152 		 * case, we simply report an error and drop the packet,
   8153 		 * since we can't sanely copy a jumbo packet to a single
   8154 		 * buffer.
   8155 		 */
   8156 retry:
   8157 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8158 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8159 		if (__predict_false(error)) {
   8160 			if (error == EFBIG) {
   8161 				if (remap == true) {
   8162 					struct mbuf *m;
   8163 
   8164 					remap = false;
   8165 					m = m_defrag(m0, M_NOWAIT);
   8166 					if (m != NULL) {
   8167 						WM_Q_EVCNT_INCR(txq, defrag);
   8168 						m0 = m;
   8169 						goto retry;
   8170 					}
   8171 				}
   8172 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8173 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8174 				    "DMA segments, dropping...\n",
   8175 				    device_xname(sc->sc_dev));
   8176 				wm_dump_mbuf_chain(sc, m0);
   8177 				m_freem(m0);
   8178 				continue;
   8179 			}
   8180 			/* Short on resources, just stop for now. */
   8181 			DPRINTF(WM_DEBUG_TX,
   8182 			    ("%s: TX: dmamap load failed: %d\n",
   8183 				device_xname(sc->sc_dev), error));
   8184 			break;
   8185 		}
   8186 
   8187 		segs_needed = dmamap->dm_nsegs;
   8188 
   8189 		/*
   8190 		 * Ensure we have enough descriptors free to describe
   8191 		 * the packet. Note, we always reserve one descriptor
   8192 		 * at the end of the ring due to the semantics of the
   8193 		 * TDT register, plus one more in the event we need
   8194 		 * to load offload context.
   8195 		 */
   8196 		if (segs_needed > txq->txq_free - 2) {
   8197 			/*
   8198 			 * Not enough free descriptors to transmit this
   8199 			 * packet.  We haven't committed anything yet,
   8200 			 * so just unload the DMA map, put the packet
   8201 			 * pack on the queue, and punt. Notify the upper
   8202 			 * layer that there are no more slots left.
   8203 			 */
   8204 			DPRINTF(WM_DEBUG_TX,
   8205 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8206 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8207 				segs_needed, txq->txq_free - 1));
   8208 			if (!is_transmit)
   8209 				ifp->if_flags |= IFF_OACTIVE;
   8210 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8211 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8212 			WM_Q_EVCNT_INCR(txq, txdstall);
   8213 			break;
   8214 		}
   8215 
   8216 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8217 
   8218 		DPRINTF(WM_DEBUG_TX,
   8219 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8220 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8221 
   8222 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8223 
   8224 		/*
   8225 		 * Store a pointer to the packet so that we can free it
   8226 		 * later.
   8227 		 *
   8228 		 * Initially, we consider the number of descriptors the
   8229 		 * packet uses the number of DMA segments.  This may be
   8230 		 * incremented by 1 if we do checksum offload (a descriptor
   8231 		 * is used to set the checksum context).
   8232 		 */
   8233 		txs->txs_mbuf = m0;
   8234 		txs->txs_firstdesc = txq->txq_next;
   8235 		txs->txs_ndesc = segs_needed;
   8236 
   8237 		/* Set up offload parameters for this packet. */
   8238 		uint32_t cmdlen, fields, dcmdlen;
   8239 		if (m0->m_pkthdr.csum_flags &
   8240 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8241 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8242 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8243 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8244 			    &do_csum) != 0) {
   8245 				/* Error message already displayed. */
   8246 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8247 				continue;
   8248 			}
   8249 		} else {
   8250 			do_csum = false;
   8251 			cmdlen = 0;
   8252 			fields = 0;
   8253 		}
   8254 
   8255 		/* Sync the DMA map. */
   8256 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8257 		    BUS_DMASYNC_PREWRITE);
   8258 
   8259 		/* Initialize the first transmit descriptor. */
   8260 		nexttx = txq->txq_next;
   8261 		if (!do_csum) {
   8262 			/* setup a legacy descriptor */
   8263 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8264 			    dmamap->dm_segs[0].ds_addr);
   8265 			txq->txq_descs[nexttx].wtx_cmdlen =
   8266 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8267 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8268 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8269 			if (vlan_has_tag(m0)) {
   8270 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8271 				    htole32(WTX_CMD_VLE);
   8272 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8273 				    htole16(vlan_get_tag(m0));
   8274 			} else
   8275 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8276 
   8277 			dcmdlen = 0;
   8278 		} else {
   8279 			/* setup an advanced data descriptor */
   8280 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8281 			    htole64(dmamap->dm_segs[0].ds_addr);
   8282 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8283 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8284 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8285 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8286 			    htole32(fields);
   8287 			DPRINTF(WM_DEBUG_TX,
   8288 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8289 				device_xname(sc->sc_dev), nexttx,
   8290 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8291 			DPRINTF(WM_DEBUG_TX,
   8292 			    ("\t 0x%08x%08x\n", fields,
   8293 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8294 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8295 		}
   8296 
   8297 		lasttx = nexttx;
   8298 		nexttx = WM_NEXTTX(txq, nexttx);
   8299 		/*
   8300 		 * fill in the next descriptors. legacy or advanced format
   8301 		 * is the same here
   8302 		 */
   8303 		for (seg = 1; seg < dmamap->dm_nsegs;
   8304 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8305 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8306 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8307 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8308 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8309 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8310 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8311 			lasttx = nexttx;
   8312 
   8313 			DPRINTF(WM_DEBUG_TX,
   8314 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8315 				device_xname(sc->sc_dev), nexttx,
   8316 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8317 				dmamap->dm_segs[seg].ds_len));
   8318 		}
   8319 
   8320 		KASSERT(lasttx != -1);
   8321 
   8322 		/*
   8323 		 * Set up the command byte on the last descriptor of
   8324 		 * the packet. If we're in the interrupt delay window,
   8325 		 * delay the interrupt.
   8326 		 */
   8327 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8328 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8329 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8330 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8331 
   8332 		txs->txs_lastdesc = lasttx;
   8333 
   8334 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8335 		    device_xname(sc->sc_dev),
   8336 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8337 
   8338 		/* Sync the descriptors we're using. */
   8339 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8340 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8341 
   8342 		/* Give the packet to the chip. */
   8343 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8344 		sent = true;
   8345 
   8346 		DPRINTF(WM_DEBUG_TX,
   8347 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8348 
   8349 		DPRINTF(WM_DEBUG_TX,
   8350 		    ("%s: TX: finished transmitting packet, job %d\n",
   8351 			device_xname(sc->sc_dev), txq->txq_snext));
   8352 
   8353 		/* Advance the tx pointer. */
   8354 		txq->txq_free -= txs->txs_ndesc;
   8355 		txq->txq_next = nexttx;
   8356 
   8357 		txq->txq_sfree--;
   8358 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8359 
   8360 		/* Pass the packet to any BPF listeners. */
   8361 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8362 	}
   8363 
   8364 	if (m0 != NULL) {
   8365 		if (!is_transmit)
   8366 			ifp->if_flags |= IFF_OACTIVE;
   8367 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8368 		WM_Q_EVCNT_INCR(txq, descdrop);
   8369 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8370 			__func__));
   8371 		m_freem(m0);
   8372 	}
   8373 
   8374 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8375 		/* No more slots; notify upper layer. */
   8376 		if (!is_transmit)
   8377 			ifp->if_flags |= IFF_OACTIVE;
   8378 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8379 	}
   8380 
   8381 	if (sent) {
   8382 		/* Set a watchdog timer in case the chip flakes out. */
   8383 		txq->txq_lastsent = time_uptime;
   8384 		txq->txq_sending = true;
   8385 	}
   8386 }
   8387 
   8388 static void
   8389 wm_deferred_start_locked(struct wm_txqueue *txq)
   8390 {
   8391 	struct wm_softc *sc = txq->txq_sc;
   8392 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8393 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8394 	int qid = wmq->wmq_id;
   8395 
   8396 	KASSERT(mutex_owned(txq->txq_lock));
   8397 
   8398 	if (txq->txq_stopping) {
   8399 		mutex_exit(txq->txq_lock);
   8400 		return;
   8401 	}
   8402 
   8403 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8404 		/* XXX need for ALTQ or one CPU system */
   8405 		if (qid == 0)
   8406 			wm_nq_start_locked(ifp);
   8407 		wm_nq_transmit_locked(ifp, txq);
   8408 	} else {
   8409 		/* XXX need for ALTQ or one CPU system */
   8410 		if (qid == 0)
   8411 			wm_start_locked(ifp);
   8412 		wm_transmit_locked(ifp, txq);
   8413 	}
   8414 }
   8415 
   8416 /* Interrupt */
   8417 
   8418 /*
   8419  * wm_txeof:
   8420  *
   8421  *	Helper; handle transmit interrupts.
   8422  */
   8423 static bool
   8424 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8425 {
   8426 	struct wm_softc *sc = txq->txq_sc;
   8427 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8428 	struct wm_txsoft *txs;
   8429 	int count = 0;
   8430 	int i;
   8431 	uint8_t status;
   8432 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8433 	bool more = false;
   8434 
   8435 	KASSERT(mutex_owned(txq->txq_lock));
   8436 
   8437 	if (txq->txq_stopping)
   8438 		return false;
   8439 
   8440 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8441 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8442 	if (wmq->wmq_id == 0)
   8443 		ifp->if_flags &= ~IFF_OACTIVE;
   8444 
   8445 	/*
   8446 	 * Go through the Tx list and free mbufs for those
   8447 	 * frames which have been transmitted.
   8448 	 */
   8449 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8450 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8451 		if (limit-- == 0) {
   8452 			more = true;
   8453 			DPRINTF(WM_DEBUG_TX,
   8454 			    ("%s: TX: loop limited, job %d is not processed\n",
   8455 				device_xname(sc->sc_dev), i));
   8456 			break;
   8457 		}
   8458 
   8459 		txs = &txq->txq_soft[i];
   8460 
   8461 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8462 			device_xname(sc->sc_dev), i));
   8463 
   8464 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8465 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8466 
   8467 		status =
   8468 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8469 		if ((status & WTX_ST_DD) == 0) {
   8470 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8471 			    BUS_DMASYNC_PREREAD);
   8472 			break;
   8473 		}
   8474 
   8475 		count++;
   8476 		DPRINTF(WM_DEBUG_TX,
   8477 		    ("%s: TX: job %d done: descs %d..%d\n",
   8478 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8479 		    txs->txs_lastdesc));
   8480 
   8481 		/*
   8482 		 * XXX We should probably be using the statistics
   8483 		 * XXX registers, but I don't know if they exist
   8484 		 * XXX on chips before the i82544.
   8485 		 */
   8486 
   8487 #ifdef WM_EVENT_COUNTERS
   8488 		if (status & WTX_ST_TU)
   8489 			WM_Q_EVCNT_INCR(txq, underrun);
   8490 #endif /* WM_EVENT_COUNTERS */
   8491 
   8492 		/*
   8493 		 * 82574 and newer's document says the status field has neither
   8494 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8495 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8496 		 * Developer's Manual", 82574 datasheet and newer.
   8497 		 *
   8498 		 * XXX I saw the LC bit was set on I218 even though the media
   8499 		 * was full duplex, so the bit might be used for other
   8500 		 * meaning ...(I have no document).
   8501 		 */
   8502 
   8503 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8504 		    && ((sc->sc_type < WM_T_82574)
   8505 			|| (sc->sc_type == WM_T_80003))) {
   8506 			ifp->if_oerrors++;
   8507 			if (status & WTX_ST_LC)
   8508 				log(LOG_WARNING, "%s: late collision\n",
   8509 				    device_xname(sc->sc_dev));
   8510 			else if (status & WTX_ST_EC) {
   8511 				ifp->if_collisions +=
   8512 				    TX_COLLISION_THRESHOLD + 1;
   8513 				log(LOG_WARNING, "%s: excessive collisions\n",
   8514 				    device_xname(sc->sc_dev));
   8515 			}
   8516 		} else
   8517 			ifp->if_opackets++;
   8518 
   8519 		txq->txq_packets++;
   8520 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8521 
   8522 		txq->txq_free += txs->txs_ndesc;
   8523 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8524 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8525 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8526 		m_freem(txs->txs_mbuf);
   8527 		txs->txs_mbuf = NULL;
   8528 	}
   8529 
   8530 	/* Update the dirty transmit buffer pointer. */
   8531 	txq->txq_sdirty = i;
   8532 	DPRINTF(WM_DEBUG_TX,
   8533 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8534 
   8535 	if (count != 0)
   8536 		rnd_add_uint32(&sc->rnd_source, count);
   8537 
   8538 	/*
   8539 	 * If there are no more pending transmissions, cancel the watchdog
   8540 	 * timer.
   8541 	 */
   8542 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8543 		txq->txq_sending = false;
   8544 
   8545 	return more;
   8546 }
   8547 
   8548 static inline uint32_t
   8549 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8550 {
   8551 	struct wm_softc *sc = rxq->rxq_sc;
   8552 
   8553 	if (sc->sc_type == WM_T_82574)
   8554 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8555 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8556 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8557 	else
   8558 		return rxq->rxq_descs[idx].wrx_status;
   8559 }
   8560 
   8561 static inline uint32_t
   8562 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8563 {
   8564 	struct wm_softc *sc = rxq->rxq_sc;
   8565 
   8566 	if (sc->sc_type == WM_T_82574)
   8567 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8568 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8569 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8570 	else
   8571 		return rxq->rxq_descs[idx].wrx_errors;
   8572 }
   8573 
   8574 static inline uint16_t
   8575 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8576 {
   8577 	struct wm_softc *sc = rxq->rxq_sc;
   8578 
   8579 	if (sc->sc_type == WM_T_82574)
   8580 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8581 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8582 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8583 	else
   8584 		return rxq->rxq_descs[idx].wrx_special;
   8585 }
   8586 
   8587 static inline int
   8588 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8589 {
   8590 	struct wm_softc *sc = rxq->rxq_sc;
   8591 
   8592 	if (sc->sc_type == WM_T_82574)
   8593 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8594 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8595 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8596 	else
   8597 		return rxq->rxq_descs[idx].wrx_len;
   8598 }
   8599 
   8600 #ifdef WM_DEBUG
   8601 static inline uint32_t
   8602 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8603 {
   8604 	struct wm_softc *sc = rxq->rxq_sc;
   8605 
   8606 	if (sc->sc_type == WM_T_82574)
   8607 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8608 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8609 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8610 	else
   8611 		return 0;
   8612 }
   8613 
   8614 static inline uint8_t
   8615 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8616 {
   8617 	struct wm_softc *sc = rxq->rxq_sc;
   8618 
   8619 	if (sc->sc_type == WM_T_82574)
   8620 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8621 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8622 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8623 	else
   8624 		return 0;
   8625 }
   8626 #endif /* WM_DEBUG */
   8627 
   8628 static inline bool
   8629 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8630     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8631 {
   8632 
   8633 	if (sc->sc_type == WM_T_82574)
   8634 		return (status & ext_bit) != 0;
   8635 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8636 		return (status & nq_bit) != 0;
   8637 	else
   8638 		return (status & legacy_bit) != 0;
   8639 }
   8640 
   8641 static inline bool
   8642 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8643     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8644 {
   8645 
   8646 	if (sc->sc_type == WM_T_82574)
   8647 		return (error & ext_bit) != 0;
   8648 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8649 		return (error & nq_bit) != 0;
   8650 	else
   8651 		return (error & legacy_bit) != 0;
   8652 }
   8653 
   8654 static inline bool
   8655 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8656 {
   8657 
   8658 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8659 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8660 		return true;
   8661 	else
   8662 		return false;
   8663 }
   8664 
   8665 static inline bool
   8666 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8667 {
   8668 	struct wm_softc *sc = rxq->rxq_sc;
   8669 
   8670 	/* XXXX missing error bit for newqueue? */
   8671 	if (wm_rxdesc_is_set_error(sc, errors,
   8672 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8673 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8674 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8675 		NQRXC_ERROR_RXE)) {
   8676 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8677 		    EXTRXC_ERROR_SE, 0))
   8678 			log(LOG_WARNING, "%s: symbol error\n",
   8679 			    device_xname(sc->sc_dev));
   8680 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8681 		    EXTRXC_ERROR_SEQ, 0))
   8682 			log(LOG_WARNING, "%s: receive sequence error\n",
   8683 			    device_xname(sc->sc_dev));
   8684 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8685 		    EXTRXC_ERROR_CE, 0))
   8686 			log(LOG_WARNING, "%s: CRC error\n",
   8687 			    device_xname(sc->sc_dev));
   8688 		return true;
   8689 	}
   8690 
   8691 	return false;
   8692 }
   8693 
   8694 static inline bool
   8695 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8696 {
   8697 	struct wm_softc *sc = rxq->rxq_sc;
   8698 
   8699 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8700 		NQRXC_STATUS_DD)) {
   8701 		/* We have processed all of the receive descriptors. */
   8702 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8703 		return false;
   8704 	}
   8705 
   8706 	return true;
   8707 }
   8708 
   8709 static inline bool
   8710 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8711     uint16_t vlantag, struct mbuf *m)
   8712 {
   8713 
   8714 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8715 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8716 		vlan_set_tag(m, le16toh(vlantag));
   8717 	}
   8718 
   8719 	return true;
   8720 }
   8721 
   8722 static inline void
   8723 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8724     uint32_t errors, struct mbuf *m)
   8725 {
   8726 	struct wm_softc *sc = rxq->rxq_sc;
   8727 
   8728 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8729 		if (wm_rxdesc_is_set_status(sc, status,
   8730 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8731 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8732 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8733 			if (wm_rxdesc_is_set_error(sc, errors,
   8734 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8735 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8736 		}
   8737 		if (wm_rxdesc_is_set_status(sc, status,
   8738 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8739 			/*
   8740 			 * Note: we don't know if this was TCP or UDP,
   8741 			 * so we just set both bits, and expect the
   8742 			 * upper layers to deal.
   8743 			 */
   8744 			WM_Q_EVCNT_INCR(rxq, tusum);
   8745 			m->m_pkthdr.csum_flags |=
   8746 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8747 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8748 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8749 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8750 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8751 		}
   8752 	}
   8753 }
   8754 
   8755 /*
   8756  * wm_rxeof:
   8757  *
   8758  *	Helper; handle receive interrupts.
   8759  */
   8760 static bool
   8761 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8762 {
   8763 	struct wm_softc *sc = rxq->rxq_sc;
   8764 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8765 	struct wm_rxsoft *rxs;
   8766 	struct mbuf *m;
   8767 	int i, len;
   8768 	int count = 0;
   8769 	uint32_t status, errors;
   8770 	uint16_t vlantag;
   8771 	bool more = false;
   8772 
   8773 	KASSERT(mutex_owned(rxq->rxq_lock));
   8774 
   8775 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8776 		if (limit-- == 0) {
   8777 			rxq->rxq_ptr = i;
   8778 			more = true;
   8779 			DPRINTF(WM_DEBUG_RX,
   8780 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8781 				device_xname(sc->sc_dev), i));
   8782 			break;
   8783 		}
   8784 
   8785 		rxs = &rxq->rxq_soft[i];
   8786 
   8787 		DPRINTF(WM_DEBUG_RX,
   8788 		    ("%s: RX: checking descriptor %d\n",
   8789 			device_xname(sc->sc_dev), i));
   8790 		wm_cdrxsync(rxq, i,
   8791 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8792 
   8793 		status = wm_rxdesc_get_status(rxq, i);
   8794 		errors = wm_rxdesc_get_errors(rxq, i);
   8795 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8796 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8797 #ifdef WM_DEBUG
   8798 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8799 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8800 #endif
   8801 
   8802 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8803 			/*
   8804 			 * Update the receive pointer holding rxq_lock
   8805 			 * consistent with increment counter.
   8806 			 */
   8807 			rxq->rxq_ptr = i;
   8808 			break;
   8809 		}
   8810 
   8811 		count++;
   8812 		if (__predict_false(rxq->rxq_discard)) {
   8813 			DPRINTF(WM_DEBUG_RX,
   8814 			    ("%s: RX: discarding contents of descriptor %d\n",
   8815 				device_xname(sc->sc_dev), i));
   8816 			wm_init_rxdesc(rxq, i);
   8817 			if (wm_rxdesc_is_eop(rxq, status)) {
   8818 				/* Reset our state. */
   8819 				DPRINTF(WM_DEBUG_RX,
   8820 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8821 					device_xname(sc->sc_dev)));
   8822 				rxq->rxq_discard = 0;
   8823 			}
   8824 			continue;
   8825 		}
   8826 
   8827 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8828 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8829 
   8830 		m = rxs->rxs_mbuf;
   8831 
   8832 		/*
   8833 		 * Add a new receive buffer to the ring, unless of
   8834 		 * course the length is zero. Treat the latter as a
   8835 		 * failed mapping.
   8836 		 */
   8837 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8838 			/*
   8839 			 * Failed, throw away what we've done so
   8840 			 * far, and discard the rest of the packet.
   8841 			 */
   8842 			ifp->if_ierrors++;
   8843 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8844 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8845 			wm_init_rxdesc(rxq, i);
   8846 			if (!wm_rxdesc_is_eop(rxq, status))
   8847 				rxq->rxq_discard = 1;
   8848 			if (rxq->rxq_head != NULL)
   8849 				m_freem(rxq->rxq_head);
   8850 			WM_RXCHAIN_RESET(rxq);
   8851 			DPRINTF(WM_DEBUG_RX,
   8852 			    ("%s: RX: Rx buffer allocation failed, "
   8853 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8854 				rxq->rxq_discard ? " (discard)" : ""));
   8855 			continue;
   8856 		}
   8857 
   8858 		m->m_len = len;
   8859 		rxq->rxq_len += len;
   8860 		DPRINTF(WM_DEBUG_RX,
   8861 		    ("%s: RX: buffer at %p len %d\n",
   8862 			device_xname(sc->sc_dev), m->m_data, len));
   8863 
   8864 		/* If this is not the end of the packet, keep looking. */
   8865 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8866 			WM_RXCHAIN_LINK(rxq, m);
   8867 			DPRINTF(WM_DEBUG_RX,
   8868 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8869 				device_xname(sc->sc_dev), rxq->rxq_len));
   8870 			continue;
   8871 		}
   8872 
   8873 		/*
   8874 		 * Okay, we have the entire packet now. The chip is
   8875 		 * configured to include the FCS except I350 and I21[01]
   8876 		 * (not all chips can be configured to strip it),
   8877 		 * so we need to trim it.
   8878 		 * May need to adjust length of previous mbuf in the
   8879 		 * chain if the current mbuf is too short.
   8880 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8881 		 * is always set in I350, so we don't trim it.
   8882 		 */
   8883 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8884 		    && (sc->sc_type != WM_T_I210)
   8885 		    && (sc->sc_type != WM_T_I211)) {
   8886 			if (m->m_len < ETHER_CRC_LEN) {
   8887 				rxq->rxq_tail->m_len
   8888 				    -= (ETHER_CRC_LEN - m->m_len);
   8889 				m->m_len = 0;
   8890 			} else
   8891 				m->m_len -= ETHER_CRC_LEN;
   8892 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8893 		} else
   8894 			len = rxq->rxq_len;
   8895 
   8896 		WM_RXCHAIN_LINK(rxq, m);
   8897 
   8898 		*rxq->rxq_tailp = NULL;
   8899 		m = rxq->rxq_head;
   8900 
   8901 		WM_RXCHAIN_RESET(rxq);
   8902 
   8903 		DPRINTF(WM_DEBUG_RX,
   8904 		    ("%s: RX: have entire packet, len -> %d\n",
   8905 			device_xname(sc->sc_dev), len));
   8906 
   8907 		/* If an error occurred, update stats and drop the packet. */
   8908 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8909 			m_freem(m);
   8910 			continue;
   8911 		}
   8912 
   8913 		/* No errors.  Receive the packet. */
   8914 		m_set_rcvif(m, ifp);
   8915 		m->m_pkthdr.len = len;
   8916 		/*
   8917 		 * TODO
   8918 		 * should be save rsshash and rsstype to this mbuf.
   8919 		 */
   8920 		DPRINTF(WM_DEBUG_RX,
   8921 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8922 			device_xname(sc->sc_dev), rsstype, rsshash));
   8923 
   8924 		/*
   8925 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8926 		 * for us.  Associate the tag with the packet.
   8927 		 */
   8928 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8929 			continue;
   8930 
   8931 		/* Set up checksum info for this packet. */
   8932 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8933 		/*
   8934 		 * Update the receive pointer holding rxq_lock consistent with
   8935 		 * increment counter.
   8936 		 */
   8937 		rxq->rxq_ptr = i;
   8938 		rxq->rxq_packets++;
   8939 		rxq->rxq_bytes += len;
   8940 		mutex_exit(rxq->rxq_lock);
   8941 
   8942 		/* Pass it on. */
   8943 		if_percpuq_enqueue(sc->sc_ipq, m);
   8944 
   8945 		mutex_enter(rxq->rxq_lock);
   8946 
   8947 		if (rxq->rxq_stopping)
   8948 			break;
   8949 	}
   8950 
   8951 	if (count != 0)
   8952 		rnd_add_uint32(&sc->rnd_source, count);
   8953 
   8954 	DPRINTF(WM_DEBUG_RX,
   8955 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8956 
   8957 	return more;
   8958 }
   8959 
   8960 /*
   8961  * wm_linkintr_gmii:
   8962  *
   8963  *	Helper; handle link interrupts for GMII.
   8964  */
   8965 static void
   8966 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8967 {
   8968 	device_t dev = sc->sc_dev;
   8969 	uint32_t status, reg;
   8970 	bool link;
   8971 	int rv;
   8972 
   8973 	KASSERT(WM_CORE_LOCKED(sc));
   8974 
   8975 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8976 		__func__));
   8977 
   8978 	if ((icr & ICR_LSC) == 0) {
   8979 		if (icr & ICR_RXSEQ)
   8980 			DPRINTF(WM_DEBUG_LINK,
   8981 			    ("%s: LINK Receive sequence error\n",
   8982 				device_xname(dev)));
   8983 		return;
   8984 	}
   8985 
   8986 	/* Link status changed */
   8987 	status = CSR_READ(sc, WMREG_STATUS);
   8988 	link = status & STATUS_LU;
   8989 	if (link)
   8990 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8991 			device_xname(dev),
   8992 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8993 	else
   8994 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8995 			device_xname(dev)));
   8996 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8997 		wm_gig_downshift_workaround_ich8lan(sc);
   8998 
   8999 	if ((sc->sc_type == WM_T_ICH8)
   9000 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9001 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9002 	}
   9003 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9004 		device_xname(dev)));
   9005 	mii_pollstat(&sc->sc_mii);
   9006 	if (sc->sc_type == WM_T_82543) {
   9007 		int miistatus, active;
   9008 
   9009 		/*
   9010 		 * With 82543, we need to force speed and
   9011 		 * duplex on the MAC equal to what the PHY
   9012 		 * speed and duplex configuration is.
   9013 		 */
   9014 		miistatus = sc->sc_mii.mii_media_status;
   9015 
   9016 		if (miistatus & IFM_ACTIVE) {
   9017 			active = sc->sc_mii.mii_media_active;
   9018 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9019 			switch (IFM_SUBTYPE(active)) {
   9020 			case IFM_10_T:
   9021 				sc->sc_ctrl |= CTRL_SPEED_10;
   9022 				break;
   9023 			case IFM_100_TX:
   9024 				sc->sc_ctrl |= CTRL_SPEED_100;
   9025 				break;
   9026 			case IFM_1000_T:
   9027 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9028 				break;
   9029 			default:
   9030 				/*
   9031 				 * fiber?
   9032 				 * Shoud not enter here.
   9033 				 */
   9034 				printf("unknown media (%x)\n", active);
   9035 				break;
   9036 			}
   9037 			if (active & IFM_FDX)
   9038 				sc->sc_ctrl |= CTRL_FD;
   9039 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9040 		}
   9041 	} else if (sc->sc_type == WM_T_PCH) {
   9042 		wm_k1_gig_workaround_hv(sc,
   9043 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9044 	}
   9045 
   9046 	/*
   9047 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9048 	 * aggressive resulting in many collisions. To avoid this, increase
   9049 	 * the IPG and reduce Rx latency in the PHY.
   9050 	 */
   9051 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9052 	    && link) {
   9053 		uint32_t tipg_reg;
   9054 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9055 		bool fdx;
   9056 		uint16_t emi_addr, emi_val;
   9057 
   9058 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9059 		tipg_reg &= ~TIPG_IPGT_MASK;
   9060 		fdx = status & STATUS_FD;
   9061 
   9062 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9063 			tipg_reg |= 0xff;
   9064 			/* Reduce Rx latency in analog PHY */
   9065 			emi_val = 0;
   9066 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9067 		    fdx && speed != STATUS_SPEED_1000) {
   9068 			tipg_reg |= 0xc;
   9069 			emi_val = 1;
   9070 		} else {
   9071 			/* Roll back the default values */
   9072 			tipg_reg |= 0x08;
   9073 			emi_val = 1;
   9074 		}
   9075 
   9076 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9077 
   9078 		rv = sc->phy.acquire(sc);
   9079 		if (rv)
   9080 			return;
   9081 
   9082 		if (sc->sc_type == WM_T_PCH2)
   9083 			emi_addr = I82579_RX_CONFIG;
   9084 		else
   9085 			emi_addr = I217_RX_CONFIG;
   9086 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9087 
   9088 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9089 			uint16_t phy_reg;
   9090 
   9091 			sc->phy.readreg_locked(dev, 2,
   9092 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9093 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9094 			if (speed == STATUS_SPEED_100
   9095 			    || speed == STATUS_SPEED_10)
   9096 				phy_reg |= 0x3e8;
   9097 			else
   9098 				phy_reg |= 0xfa;
   9099 			sc->phy.writereg_locked(dev, 2,
   9100 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9101 
   9102 			if (speed == STATUS_SPEED_1000) {
   9103 				sc->phy.readreg_locked(dev, 2,
   9104 				    HV_PM_CTRL, &phy_reg);
   9105 
   9106 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9107 
   9108 				sc->phy.writereg_locked(dev, 2,
   9109 				    HV_PM_CTRL, phy_reg);
   9110 			}
   9111 		}
   9112 		sc->phy.release(sc);
   9113 
   9114 		if (rv)
   9115 			return;
   9116 
   9117 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9118 			uint16_t data, ptr_gap;
   9119 
   9120 			if (speed == STATUS_SPEED_1000) {
   9121 				rv = sc->phy.acquire(sc);
   9122 				if (rv)
   9123 					return;
   9124 
   9125 				rv = sc->phy.readreg_locked(dev, 2,
   9126 				    I219_UNKNOWN1, &data);
   9127 				if (rv) {
   9128 					sc->phy.release(sc);
   9129 					return;
   9130 				}
   9131 
   9132 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9133 				if (ptr_gap < 0x18) {
   9134 					data &= ~(0x3ff << 2);
   9135 					data |= (0x18 << 2);
   9136 					rv = sc->phy.writereg_locked(dev,
   9137 					    2, I219_UNKNOWN1, data);
   9138 				}
   9139 				sc->phy.release(sc);
   9140 				if (rv)
   9141 					return;
   9142 			} else {
   9143 				rv = sc->phy.acquire(sc);
   9144 				if (rv)
   9145 					return;
   9146 
   9147 				rv = sc->phy.writereg_locked(dev, 2,
   9148 				    I219_UNKNOWN1, 0xc023);
   9149 				sc->phy.release(sc);
   9150 				if (rv)
   9151 					return;
   9152 
   9153 			}
   9154 		}
   9155 	}
   9156 
   9157 	/*
   9158 	 * I217 Packet Loss issue:
   9159 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9160 	 * on power up.
   9161 	 * Set the Beacon Duration for I217 to 8 usec
   9162 	 */
   9163 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9164 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9165 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9166 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9167 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9168 	}
   9169 
   9170 	/* Work-around I218 hang issue */
   9171 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9172 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9173 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9174 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9175 		wm_k1_workaround_lpt_lp(sc, link);
   9176 
   9177 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9178 		/*
   9179 		 * Set platform power management values for Latency
   9180 		 * Tolerance Reporting (LTR)
   9181 		 */
   9182 		wm_platform_pm_pch_lpt(sc,
   9183 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9184 	}
   9185 
   9186 	/* Clear link partner's EEE ability */
   9187 	sc->eee_lp_ability = 0;
   9188 
   9189 	/* FEXTNVM6 K1-off workaround */
   9190 	if (sc->sc_type == WM_T_PCH_SPT) {
   9191 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9192 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9193 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9194 		else
   9195 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9196 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9197 	}
   9198 
   9199 	if (!link)
   9200 		return;
   9201 
   9202 	switch (sc->sc_type) {
   9203 	case WM_T_PCH2:
   9204 		wm_k1_workaround_lv(sc);
   9205 		/* FALLTHROUGH */
   9206 	case WM_T_PCH:
   9207 		if (sc->sc_phytype == WMPHY_82578)
   9208 			wm_link_stall_workaround_hv(sc);
   9209 		break;
   9210 	default:
   9211 		break;
   9212 	}
   9213 
   9214 	/* Enable/Disable EEE after link up */
   9215 	if (sc->sc_phytype > WMPHY_82579)
   9216 		wm_set_eee_pchlan(sc);
   9217 }
   9218 
   9219 /*
   9220  * wm_linkintr_tbi:
   9221  *
   9222  *	Helper; handle link interrupts for TBI mode.
   9223  */
   9224 static void
   9225 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9226 {
   9227 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9228 	uint32_t status;
   9229 
   9230 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9231 		__func__));
   9232 
   9233 	status = CSR_READ(sc, WMREG_STATUS);
   9234 	if (icr & ICR_LSC) {
   9235 		wm_check_for_link(sc);
   9236 		if (status & STATUS_LU) {
   9237 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9238 				device_xname(sc->sc_dev),
   9239 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9240 			/*
   9241 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9242 			 * so we should update sc->sc_ctrl
   9243 			 */
   9244 
   9245 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9246 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9247 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9248 			if (status & STATUS_FD)
   9249 				sc->sc_tctl |=
   9250 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9251 			else
   9252 				sc->sc_tctl |=
   9253 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9254 			if (sc->sc_ctrl & CTRL_TFCE)
   9255 				sc->sc_fcrtl |= FCRTL_XONE;
   9256 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9257 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9258 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9259 			sc->sc_tbi_linkup = 1;
   9260 			if_link_state_change(ifp, LINK_STATE_UP);
   9261 		} else {
   9262 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9263 				device_xname(sc->sc_dev)));
   9264 			sc->sc_tbi_linkup = 0;
   9265 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9266 		}
   9267 		/* Update LED */
   9268 		wm_tbi_serdes_set_linkled(sc);
   9269 	} else if (icr & ICR_RXSEQ)
   9270 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9271 			device_xname(sc->sc_dev)));
   9272 }
   9273 
   9274 /*
   9275  * wm_linkintr_serdes:
   9276  *
   9277  *	Helper; handle link interrupts for TBI mode.
   9278  */
   9279 static void
   9280 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9281 {
   9282 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9283 	struct mii_data *mii = &sc->sc_mii;
   9284 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9285 	uint32_t pcs_adv, pcs_lpab, reg;
   9286 
   9287 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9288 		__func__));
   9289 
   9290 	if (icr & ICR_LSC) {
   9291 		/* Check PCS */
   9292 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9293 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9294 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9295 				device_xname(sc->sc_dev)));
   9296 			mii->mii_media_status |= IFM_ACTIVE;
   9297 			sc->sc_tbi_linkup = 1;
   9298 			if_link_state_change(ifp, LINK_STATE_UP);
   9299 		} else {
   9300 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9301 				device_xname(sc->sc_dev)));
   9302 			mii->mii_media_status |= IFM_NONE;
   9303 			sc->sc_tbi_linkup = 0;
   9304 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9305 			wm_tbi_serdes_set_linkled(sc);
   9306 			return;
   9307 		}
   9308 		mii->mii_media_active |= IFM_1000_SX;
   9309 		if ((reg & PCS_LSTS_FDX) != 0)
   9310 			mii->mii_media_active |= IFM_FDX;
   9311 		else
   9312 			mii->mii_media_active |= IFM_HDX;
   9313 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9314 			/* Check flow */
   9315 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9316 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9317 				DPRINTF(WM_DEBUG_LINK,
   9318 				    ("XXX LINKOK but not ACOMP\n"));
   9319 				return;
   9320 			}
   9321 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9322 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9323 			DPRINTF(WM_DEBUG_LINK,
   9324 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9325 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9326 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9327 				mii->mii_media_active |= IFM_FLOW
   9328 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9329 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9330 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9331 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9332 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9333 				mii->mii_media_active |= IFM_FLOW
   9334 				    | IFM_ETH_TXPAUSE;
   9335 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9336 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9337 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9338 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9339 				mii->mii_media_active |= IFM_FLOW
   9340 				    | IFM_ETH_RXPAUSE;
   9341 		}
   9342 		/* Update LED */
   9343 		wm_tbi_serdes_set_linkled(sc);
   9344 	} else
   9345 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9346 		    device_xname(sc->sc_dev)));
   9347 }
   9348 
   9349 /*
   9350  * wm_linkintr:
   9351  *
   9352  *	Helper; handle link interrupts.
   9353  */
   9354 static void
   9355 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9356 {
   9357 
   9358 	KASSERT(WM_CORE_LOCKED(sc));
   9359 
   9360 	if (sc->sc_flags & WM_F_HAS_MII)
   9361 		wm_linkintr_gmii(sc, icr);
   9362 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9363 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9364 		wm_linkintr_serdes(sc, icr);
   9365 	else
   9366 		wm_linkintr_tbi(sc, icr);
   9367 }
   9368 
   9369 /*
   9370  * wm_intr_legacy:
   9371  *
   9372  *	Interrupt service routine for INTx and MSI.
   9373  */
   9374 static int
   9375 wm_intr_legacy(void *arg)
   9376 {
   9377 	struct wm_softc *sc = arg;
   9378 	struct wm_queue *wmq = &sc->sc_queue[0];
   9379 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9380 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9381 	uint32_t icr, rndval = 0;
   9382 	int handled = 0;
   9383 
   9384 	while (1 /* CONSTCOND */) {
   9385 		icr = CSR_READ(sc, WMREG_ICR);
   9386 		if ((icr & sc->sc_icr) == 0)
   9387 			break;
   9388 		if (handled == 0)
   9389 			DPRINTF(WM_DEBUG_TX,
   9390 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9391 		if (rndval == 0)
   9392 			rndval = icr;
   9393 
   9394 		mutex_enter(rxq->rxq_lock);
   9395 
   9396 		if (rxq->rxq_stopping) {
   9397 			mutex_exit(rxq->rxq_lock);
   9398 			break;
   9399 		}
   9400 
   9401 		handled = 1;
   9402 
   9403 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9404 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9405 			DPRINTF(WM_DEBUG_RX,
   9406 			    ("%s: RX: got Rx intr 0x%08x\n",
   9407 				device_xname(sc->sc_dev),
   9408 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9409 			WM_Q_EVCNT_INCR(rxq, intr);
   9410 		}
   9411 #endif
   9412 		/*
   9413 		 * wm_rxeof() does *not* call upper layer functions directly,
   9414 		 * as if_percpuq_enqueue() just call softint_schedule().
   9415 		 * So, we can call wm_rxeof() in interrupt context.
   9416 		 */
   9417 		wm_rxeof(rxq, UINT_MAX);
   9418 
   9419 		mutex_exit(rxq->rxq_lock);
   9420 		mutex_enter(txq->txq_lock);
   9421 
   9422 		if (txq->txq_stopping) {
   9423 			mutex_exit(txq->txq_lock);
   9424 			break;
   9425 		}
   9426 
   9427 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9428 		if (icr & ICR_TXDW) {
   9429 			DPRINTF(WM_DEBUG_TX,
   9430 			    ("%s: TX: got TXDW interrupt\n",
   9431 				device_xname(sc->sc_dev)));
   9432 			WM_Q_EVCNT_INCR(txq, txdw);
   9433 		}
   9434 #endif
   9435 		wm_txeof(txq, UINT_MAX);
   9436 
   9437 		mutex_exit(txq->txq_lock);
   9438 		WM_CORE_LOCK(sc);
   9439 
   9440 		if (sc->sc_core_stopping) {
   9441 			WM_CORE_UNLOCK(sc);
   9442 			break;
   9443 		}
   9444 
   9445 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9446 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9447 			wm_linkintr(sc, icr);
   9448 		}
   9449 
   9450 		WM_CORE_UNLOCK(sc);
   9451 
   9452 		if (icr & ICR_RXO) {
   9453 #if defined(WM_DEBUG)
   9454 			log(LOG_WARNING, "%s: Receive overrun\n",
   9455 			    device_xname(sc->sc_dev));
   9456 #endif /* defined(WM_DEBUG) */
   9457 		}
   9458 	}
   9459 
   9460 	rnd_add_uint32(&sc->rnd_source, rndval);
   9461 
   9462 	if (handled) {
   9463 		/* Try to get more packets going. */
   9464 		softint_schedule(wmq->wmq_si);
   9465 	}
   9466 
   9467 	return handled;
   9468 }
   9469 
   9470 static inline void
   9471 wm_txrxintr_disable(struct wm_queue *wmq)
   9472 {
   9473 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9474 
   9475 	if (sc->sc_type == WM_T_82574)
   9476 		CSR_WRITE(sc, WMREG_IMC,
   9477 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9478 	else if (sc->sc_type == WM_T_82575)
   9479 		CSR_WRITE(sc, WMREG_EIMC,
   9480 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9481 	else
   9482 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9483 }
   9484 
   9485 static inline void
   9486 wm_txrxintr_enable(struct wm_queue *wmq)
   9487 {
   9488 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9489 
   9490 	wm_itrs_calculate(sc, wmq);
   9491 
   9492 	/*
   9493 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9494 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9495 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9496 	 * while each wm_handle_queue(wmq) is runnig.
   9497 	 */
   9498 	if (sc->sc_type == WM_T_82574)
   9499 		CSR_WRITE(sc, WMREG_IMS,
   9500 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9501 	else if (sc->sc_type == WM_T_82575)
   9502 		CSR_WRITE(sc, WMREG_EIMS,
   9503 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9504 	else
   9505 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9506 }
   9507 
   9508 static int
   9509 wm_txrxintr_msix(void *arg)
   9510 {
   9511 	struct wm_queue *wmq = arg;
   9512 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9513 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9514 	struct wm_softc *sc = txq->txq_sc;
   9515 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9516 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9517 	bool txmore;
   9518 	bool rxmore;
   9519 
   9520 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9521 
   9522 	DPRINTF(WM_DEBUG_TX,
   9523 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9524 
   9525 	wm_txrxintr_disable(wmq);
   9526 
   9527 	mutex_enter(txq->txq_lock);
   9528 
   9529 	if (txq->txq_stopping) {
   9530 		mutex_exit(txq->txq_lock);
   9531 		return 0;
   9532 	}
   9533 
   9534 	WM_Q_EVCNT_INCR(txq, txdw);
   9535 	txmore = wm_txeof(txq, txlimit);
   9536 	/* wm_deferred start() is done in wm_handle_queue(). */
   9537 	mutex_exit(txq->txq_lock);
   9538 
   9539 	DPRINTF(WM_DEBUG_RX,
   9540 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9541 	mutex_enter(rxq->rxq_lock);
   9542 
   9543 	if (rxq->rxq_stopping) {
   9544 		mutex_exit(rxq->rxq_lock);
   9545 		return 0;
   9546 	}
   9547 
   9548 	WM_Q_EVCNT_INCR(rxq, intr);
   9549 	rxmore = wm_rxeof(rxq, rxlimit);
   9550 	mutex_exit(rxq->rxq_lock);
   9551 
   9552 	wm_itrs_writereg(sc, wmq);
   9553 
   9554 	if (txmore || rxmore)
   9555 		softint_schedule(wmq->wmq_si);
   9556 	else
   9557 		wm_txrxintr_enable(wmq);
   9558 
   9559 	return 1;
   9560 }
   9561 
   9562 static void
   9563 wm_handle_queue(void *arg)
   9564 {
   9565 	struct wm_queue *wmq = arg;
   9566 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9567 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9568 	struct wm_softc *sc = txq->txq_sc;
   9569 	u_int txlimit = sc->sc_tx_process_limit;
   9570 	u_int rxlimit = sc->sc_rx_process_limit;
   9571 	bool txmore;
   9572 	bool rxmore;
   9573 
   9574 	mutex_enter(txq->txq_lock);
   9575 	if (txq->txq_stopping) {
   9576 		mutex_exit(txq->txq_lock);
   9577 		return;
   9578 	}
   9579 	txmore = wm_txeof(txq, txlimit);
   9580 	wm_deferred_start_locked(txq);
   9581 	mutex_exit(txq->txq_lock);
   9582 
   9583 	mutex_enter(rxq->rxq_lock);
   9584 	if (rxq->rxq_stopping) {
   9585 		mutex_exit(rxq->rxq_lock);
   9586 		return;
   9587 	}
   9588 	WM_Q_EVCNT_INCR(rxq, defer);
   9589 	rxmore = wm_rxeof(rxq, rxlimit);
   9590 	mutex_exit(rxq->rxq_lock);
   9591 
   9592 	if (txmore || rxmore)
   9593 		softint_schedule(wmq->wmq_si);
   9594 	else
   9595 		wm_txrxintr_enable(wmq);
   9596 }
   9597 
   9598 /*
   9599  * wm_linkintr_msix:
   9600  *
   9601  *	Interrupt service routine for link status change for MSI-X.
   9602  */
   9603 static int
   9604 wm_linkintr_msix(void *arg)
   9605 {
   9606 	struct wm_softc *sc = arg;
   9607 	uint32_t reg;
   9608 	bool has_rxo;
   9609 
   9610 	DPRINTF(WM_DEBUG_LINK,
   9611 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9612 
   9613 	reg = CSR_READ(sc, WMREG_ICR);
   9614 	WM_CORE_LOCK(sc);
   9615 	if (sc->sc_core_stopping)
   9616 		goto out;
   9617 
   9618 	if ((reg & ICR_LSC) != 0) {
   9619 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9620 		wm_linkintr(sc, ICR_LSC);
   9621 	}
   9622 
   9623 	/*
   9624 	 * XXX 82574 MSI-X mode workaround
   9625 	 *
   9626 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9627 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9628 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9629 	 * interrupts by writing WMREG_ICS to process receive packets.
   9630 	 */
   9631 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9632 #if defined(WM_DEBUG)
   9633 		log(LOG_WARNING, "%s: Receive overrun\n",
   9634 		    device_xname(sc->sc_dev));
   9635 #endif /* defined(WM_DEBUG) */
   9636 
   9637 		has_rxo = true;
   9638 		/*
   9639 		 * The RXO interrupt is very high rate when receive traffic is
   9640 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9641 		 * interrupts. ICR_OTHER will be enabled at the end of
   9642 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9643 		 * ICR_RXQ(1) interrupts.
   9644 		 */
   9645 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9646 
   9647 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9648 	}
   9649 
   9650 
   9651 
   9652 out:
   9653 	WM_CORE_UNLOCK(sc);
   9654 
   9655 	if (sc->sc_type == WM_T_82574) {
   9656 		if (!has_rxo)
   9657 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9658 		else
   9659 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9660 	} else if (sc->sc_type == WM_T_82575)
   9661 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9662 	else
   9663 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9664 
   9665 	return 1;
   9666 }
   9667 
   9668 /*
   9669  * Media related.
   9670  * GMII, SGMII, TBI (and SERDES)
   9671  */
   9672 
   9673 /* Common */
   9674 
   9675 /*
   9676  * wm_tbi_serdes_set_linkled:
   9677  *
   9678  *	Update the link LED on TBI and SERDES devices.
   9679  */
   9680 static void
   9681 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9682 {
   9683 
   9684 	if (sc->sc_tbi_linkup)
   9685 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9686 	else
   9687 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9688 
   9689 	/* 82540 or newer devices are active low */
   9690 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9691 
   9692 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9693 }
   9694 
   9695 /* GMII related */
   9696 
   9697 /*
   9698  * wm_gmii_reset:
   9699  *
   9700  *	Reset the PHY.
   9701  */
   9702 static void
   9703 wm_gmii_reset(struct wm_softc *sc)
   9704 {
   9705 	uint32_t reg;
   9706 	int rv;
   9707 
   9708 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9709 		device_xname(sc->sc_dev), __func__));
   9710 
   9711 	rv = sc->phy.acquire(sc);
   9712 	if (rv != 0) {
   9713 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9714 		    __func__);
   9715 		return;
   9716 	}
   9717 
   9718 	switch (sc->sc_type) {
   9719 	case WM_T_82542_2_0:
   9720 	case WM_T_82542_2_1:
   9721 		/* null */
   9722 		break;
   9723 	case WM_T_82543:
   9724 		/*
   9725 		 * With 82543, we need to force speed and duplex on the MAC
   9726 		 * equal to what the PHY speed and duplex configuration is.
   9727 		 * In addition, we need to perform a hardware reset on the PHY
   9728 		 * to take it out of reset.
   9729 		 */
   9730 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9731 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9732 
   9733 		/* The PHY reset pin is active-low. */
   9734 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9735 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9736 		    CTRL_EXT_SWDPIN(4));
   9737 		reg |= CTRL_EXT_SWDPIO(4);
   9738 
   9739 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9740 		CSR_WRITE_FLUSH(sc);
   9741 		delay(10*1000);
   9742 
   9743 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9744 		CSR_WRITE_FLUSH(sc);
   9745 		delay(150);
   9746 #if 0
   9747 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9748 #endif
   9749 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9750 		break;
   9751 	case WM_T_82544:	/* reset 10000us */
   9752 	case WM_T_82540:
   9753 	case WM_T_82545:
   9754 	case WM_T_82545_3:
   9755 	case WM_T_82546:
   9756 	case WM_T_82546_3:
   9757 	case WM_T_82541:
   9758 	case WM_T_82541_2:
   9759 	case WM_T_82547:
   9760 	case WM_T_82547_2:
   9761 	case WM_T_82571:	/* reset 100us */
   9762 	case WM_T_82572:
   9763 	case WM_T_82573:
   9764 	case WM_T_82574:
   9765 	case WM_T_82575:
   9766 	case WM_T_82576:
   9767 	case WM_T_82580:
   9768 	case WM_T_I350:
   9769 	case WM_T_I354:
   9770 	case WM_T_I210:
   9771 	case WM_T_I211:
   9772 	case WM_T_82583:
   9773 	case WM_T_80003:
   9774 		/* generic reset */
   9775 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9776 		CSR_WRITE_FLUSH(sc);
   9777 		delay(20000);
   9778 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9779 		CSR_WRITE_FLUSH(sc);
   9780 		delay(20000);
   9781 
   9782 		if ((sc->sc_type == WM_T_82541)
   9783 		    || (sc->sc_type == WM_T_82541_2)
   9784 		    || (sc->sc_type == WM_T_82547)
   9785 		    || (sc->sc_type == WM_T_82547_2)) {
   9786 			/* workaround for igp are done in igp_reset() */
   9787 			/* XXX add code to set LED after phy reset */
   9788 		}
   9789 		break;
   9790 	case WM_T_ICH8:
   9791 	case WM_T_ICH9:
   9792 	case WM_T_ICH10:
   9793 	case WM_T_PCH:
   9794 	case WM_T_PCH2:
   9795 	case WM_T_PCH_LPT:
   9796 	case WM_T_PCH_SPT:
   9797 	case WM_T_PCH_CNP:
   9798 		/* generic reset */
   9799 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9800 		CSR_WRITE_FLUSH(sc);
   9801 		delay(100);
   9802 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9803 		CSR_WRITE_FLUSH(sc);
   9804 		delay(150);
   9805 		break;
   9806 	default:
   9807 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9808 		    __func__);
   9809 		break;
   9810 	}
   9811 
   9812 	sc->phy.release(sc);
   9813 
   9814 	/* get_cfg_done */
   9815 	wm_get_cfg_done(sc);
   9816 
   9817 	/* extra setup */
   9818 	switch (sc->sc_type) {
   9819 	case WM_T_82542_2_0:
   9820 	case WM_T_82542_2_1:
   9821 	case WM_T_82543:
   9822 	case WM_T_82544:
   9823 	case WM_T_82540:
   9824 	case WM_T_82545:
   9825 	case WM_T_82545_3:
   9826 	case WM_T_82546:
   9827 	case WM_T_82546_3:
   9828 	case WM_T_82541_2:
   9829 	case WM_T_82547_2:
   9830 	case WM_T_82571:
   9831 	case WM_T_82572:
   9832 	case WM_T_82573:
   9833 	case WM_T_82574:
   9834 	case WM_T_82583:
   9835 	case WM_T_82575:
   9836 	case WM_T_82576:
   9837 	case WM_T_82580:
   9838 	case WM_T_I350:
   9839 	case WM_T_I354:
   9840 	case WM_T_I210:
   9841 	case WM_T_I211:
   9842 	case WM_T_80003:
   9843 		/* null */
   9844 		break;
   9845 	case WM_T_82541:
   9846 	case WM_T_82547:
   9847 		/* XXX Configure actively LED after PHY reset */
   9848 		break;
   9849 	case WM_T_ICH8:
   9850 	case WM_T_ICH9:
   9851 	case WM_T_ICH10:
   9852 	case WM_T_PCH:
   9853 	case WM_T_PCH2:
   9854 	case WM_T_PCH_LPT:
   9855 	case WM_T_PCH_SPT:
   9856 	case WM_T_PCH_CNP:
   9857 		wm_phy_post_reset(sc);
   9858 		break;
   9859 	default:
   9860 		panic("%s: unknown type\n", __func__);
   9861 		break;
   9862 	}
   9863 }
   9864 
   9865 /*
   9866  * Setup sc_phytype and mii_{read|write}reg.
   9867  *
   9868  *  To identify PHY type, correct read/write function should be selected.
   9869  * To select correct read/write function, PCI ID or MAC type are required
   9870  * without accessing PHY registers.
   9871  *
   9872  *  On the first call of this function, PHY ID is not known yet. Check
   9873  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9874  * result might be incorrect.
   9875  *
   9876  *  In the second call, PHY OUI and model is used to identify PHY type.
   9877  * It might not be perfpect because of the lack of compared entry, but it
   9878  * would be better than the first call.
   9879  *
   9880  *  If the detected new result and previous assumption is different,
   9881  * diagnous message will be printed.
   9882  */
   9883 static void
   9884 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9885     uint16_t phy_model)
   9886 {
   9887 	device_t dev = sc->sc_dev;
   9888 	struct mii_data *mii = &sc->sc_mii;
   9889 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9890 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9891 	mii_readreg_t new_readreg;
   9892 	mii_writereg_t new_writereg;
   9893 
   9894 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9895 		device_xname(sc->sc_dev), __func__));
   9896 
   9897 	if (mii->mii_readreg == NULL) {
   9898 		/*
   9899 		 *  This is the first call of this function. For ICH and PCH
   9900 		 * variants, it's difficult to determine the PHY access method
   9901 		 * by sc_type, so use the PCI product ID for some devices.
   9902 		 */
   9903 
   9904 		switch (sc->sc_pcidevid) {
   9905 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9906 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9907 			/* 82577 */
   9908 			new_phytype = WMPHY_82577;
   9909 			break;
   9910 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9911 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9912 			/* 82578 */
   9913 			new_phytype = WMPHY_82578;
   9914 			break;
   9915 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9916 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9917 			/* 82579 */
   9918 			new_phytype = WMPHY_82579;
   9919 			break;
   9920 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9921 		case PCI_PRODUCT_INTEL_82801I_BM:
   9922 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9923 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9924 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9925 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9926 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9927 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9928 			/* ICH8, 9, 10 with 82567 */
   9929 			new_phytype = WMPHY_BM;
   9930 			break;
   9931 		default:
   9932 			break;
   9933 		}
   9934 	} else {
   9935 		/* It's not the first call. Use PHY OUI and model */
   9936 		switch (phy_oui) {
   9937 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9938 			switch (phy_model) {
   9939 			case 0x0004: /* XXX */
   9940 				new_phytype = WMPHY_82578;
   9941 				break;
   9942 			default:
   9943 				break;
   9944 			}
   9945 			break;
   9946 		case MII_OUI_xxMARVELL:
   9947 			switch (phy_model) {
   9948 			case MII_MODEL_xxMARVELL_I210:
   9949 				new_phytype = WMPHY_I210;
   9950 				break;
   9951 			case MII_MODEL_xxMARVELL_E1011:
   9952 			case MII_MODEL_xxMARVELL_E1000_3:
   9953 			case MII_MODEL_xxMARVELL_E1000_5:
   9954 			case MII_MODEL_xxMARVELL_E1112:
   9955 				new_phytype = WMPHY_M88;
   9956 				break;
   9957 			case MII_MODEL_xxMARVELL_E1149:
   9958 				new_phytype = WMPHY_BM;
   9959 				break;
   9960 			case MII_MODEL_xxMARVELL_E1111:
   9961 			case MII_MODEL_xxMARVELL_I347:
   9962 			case MII_MODEL_xxMARVELL_E1512:
   9963 			case MII_MODEL_xxMARVELL_E1340M:
   9964 			case MII_MODEL_xxMARVELL_E1543:
   9965 				new_phytype = WMPHY_M88;
   9966 				break;
   9967 			case MII_MODEL_xxMARVELL_I82563:
   9968 				new_phytype = WMPHY_GG82563;
   9969 				break;
   9970 			default:
   9971 				break;
   9972 			}
   9973 			break;
   9974 		case MII_OUI_INTEL:
   9975 			switch (phy_model) {
   9976 			case MII_MODEL_INTEL_I82577:
   9977 				new_phytype = WMPHY_82577;
   9978 				break;
   9979 			case MII_MODEL_INTEL_I82579:
   9980 				new_phytype = WMPHY_82579;
   9981 				break;
   9982 			case MII_MODEL_INTEL_I217:
   9983 				new_phytype = WMPHY_I217;
   9984 				break;
   9985 			case MII_MODEL_INTEL_I82580:
   9986 			case MII_MODEL_INTEL_I350:
   9987 				new_phytype = WMPHY_82580;
   9988 				break;
   9989 			default:
   9990 				break;
   9991 			}
   9992 			break;
   9993 		case MII_OUI_yyINTEL:
   9994 			switch (phy_model) {
   9995 			case MII_MODEL_yyINTEL_I82562G:
   9996 			case MII_MODEL_yyINTEL_I82562EM:
   9997 			case MII_MODEL_yyINTEL_I82562ET:
   9998 				new_phytype = WMPHY_IFE;
   9999 				break;
   10000 			case MII_MODEL_yyINTEL_IGP01E1000:
   10001 				new_phytype = WMPHY_IGP;
   10002 				break;
   10003 			case MII_MODEL_yyINTEL_I82566:
   10004 				new_phytype = WMPHY_IGP_3;
   10005 				break;
   10006 			default:
   10007 				break;
   10008 			}
   10009 			break;
   10010 		default:
   10011 			break;
   10012 		}
   10013 		if (new_phytype == WMPHY_UNKNOWN)
   10014 			aprint_verbose_dev(dev,
   10015 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10016 			    __func__, phy_oui, phy_model);
   10017 
   10018 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10019 		    && (sc->sc_phytype != new_phytype )) {
   10020 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10021 			    "was incorrect. PHY type from PHY ID = %u\n",
   10022 			    sc->sc_phytype, new_phytype);
   10023 		}
   10024 	}
   10025 
   10026 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10027 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10028 		/* SGMII */
   10029 		new_readreg = wm_sgmii_readreg;
   10030 		new_writereg = wm_sgmii_writereg;
   10031 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10032 		/* BM2 (phyaddr == 1) */
   10033 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10034 		    && (new_phytype != WMPHY_BM)
   10035 		    && (new_phytype != WMPHY_UNKNOWN))
   10036 			doubt_phytype = new_phytype;
   10037 		new_phytype = WMPHY_BM;
   10038 		new_readreg = wm_gmii_bm_readreg;
   10039 		new_writereg = wm_gmii_bm_writereg;
   10040 	} else if (sc->sc_type >= WM_T_PCH) {
   10041 		/* All PCH* use _hv_ */
   10042 		new_readreg = wm_gmii_hv_readreg;
   10043 		new_writereg = wm_gmii_hv_writereg;
   10044 	} else if (sc->sc_type >= WM_T_ICH8) {
   10045 		/* non-82567 ICH8, 9 and 10 */
   10046 		new_readreg = wm_gmii_i82544_readreg;
   10047 		new_writereg = wm_gmii_i82544_writereg;
   10048 	} else if (sc->sc_type >= WM_T_80003) {
   10049 		/* 80003 */
   10050 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10051 		    && (new_phytype != WMPHY_GG82563)
   10052 		    && (new_phytype != WMPHY_UNKNOWN))
   10053 			doubt_phytype = new_phytype;
   10054 		new_phytype = WMPHY_GG82563;
   10055 		new_readreg = wm_gmii_i80003_readreg;
   10056 		new_writereg = wm_gmii_i80003_writereg;
   10057 	} else if (sc->sc_type >= WM_T_I210) {
   10058 		/* I210 and I211 */
   10059 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10060 		    && (new_phytype != WMPHY_I210)
   10061 		    && (new_phytype != WMPHY_UNKNOWN))
   10062 			doubt_phytype = new_phytype;
   10063 		new_phytype = WMPHY_I210;
   10064 		new_readreg = wm_gmii_gs40g_readreg;
   10065 		new_writereg = wm_gmii_gs40g_writereg;
   10066 	} else if (sc->sc_type >= WM_T_82580) {
   10067 		/* 82580, I350 and I354 */
   10068 		new_readreg = wm_gmii_82580_readreg;
   10069 		new_writereg = wm_gmii_82580_writereg;
   10070 	} else if (sc->sc_type >= WM_T_82544) {
   10071 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10072 		new_readreg = wm_gmii_i82544_readreg;
   10073 		new_writereg = wm_gmii_i82544_writereg;
   10074 	} else {
   10075 		new_readreg = wm_gmii_i82543_readreg;
   10076 		new_writereg = wm_gmii_i82543_writereg;
   10077 	}
   10078 
   10079 	if (new_phytype == WMPHY_BM) {
   10080 		/* All BM use _bm_ */
   10081 		new_readreg = wm_gmii_bm_readreg;
   10082 		new_writereg = wm_gmii_bm_writereg;
   10083 	}
   10084 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10085 		/* All PCH* use _hv_ */
   10086 		new_readreg = wm_gmii_hv_readreg;
   10087 		new_writereg = wm_gmii_hv_writereg;
   10088 	}
   10089 
   10090 	/* Diag output */
   10091 	if (doubt_phytype != WMPHY_UNKNOWN)
   10092 		aprint_error_dev(dev, "Assumed new PHY type was "
   10093 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10094 		    new_phytype);
   10095 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10096 	    && (sc->sc_phytype != new_phytype ))
   10097 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10098 		    "was incorrect. New PHY type = %u\n",
   10099 		    sc->sc_phytype, new_phytype);
   10100 
   10101 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10102 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10103 
   10104 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10105 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10106 		    "function was incorrect.\n");
   10107 
   10108 	/* Update now */
   10109 	sc->sc_phytype = new_phytype;
   10110 	mii->mii_readreg = new_readreg;
   10111 	mii->mii_writereg = new_writereg;
   10112 	if (new_readreg == wm_gmii_hv_readreg) {
   10113 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10114 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10115 	} else if (new_readreg == wm_sgmii_readreg) {
   10116 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10117 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10118 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10119 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10120 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10121 	}
   10122 }
   10123 
   10124 /*
   10125  * wm_get_phy_id_82575:
   10126  *
   10127  * Return PHY ID. Return -1 if it failed.
   10128  */
   10129 static int
   10130 wm_get_phy_id_82575(struct wm_softc *sc)
   10131 {
   10132 	uint32_t reg;
   10133 	int phyid = -1;
   10134 
   10135 	/* XXX */
   10136 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10137 		return -1;
   10138 
   10139 	if (wm_sgmii_uses_mdio(sc)) {
   10140 		switch (sc->sc_type) {
   10141 		case WM_T_82575:
   10142 		case WM_T_82576:
   10143 			reg = CSR_READ(sc, WMREG_MDIC);
   10144 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10145 			break;
   10146 		case WM_T_82580:
   10147 		case WM_T_I350:
   10148 		case WM_T_I354:
   10149 		case WM_T_I210:
   10150 		case WM_T_I211:
   10151 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10152 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10153 			break;
   10154 		default:
   10155 			return -1;
   10156 		}
   10157 	}
   10158 
   10159 	return phyid;
   10160 }
   10161 
   10162 
   10163 /*
   10164  * wm_gmii_mediainit:
   10165  *
   10166  *	Initialize media for use on 1000BASE-T devices.
   10167  */
   10168 static void
   10169 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10170 {
   10171 	device_t dev = sc->sc_dev;
   10172 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10173 	struct mii_data *mii = &sc->sc_mii;
   10174 	uint32_t reg;
   10175 
   10176 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10177 		device_xname(sc->sc_dev), __func__));
   10178 
   10179 	/* We have GMII. */
   10180 	sc->sc_flags |= WM_F_HAS_MII;
   10181 
   10182 	if (sc->sc_type == WM_T_80003)
   10183 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10184 	else
   10185 		sc->sc_tipg = TIPG_1000T_DFLT;
   10186 
   10187 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10188 	if ((sc->sc_type == WM_T_82580)
   10189 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10190 	    || (sc->sc_type == WM_T_I211)) {
   10191 		reg = CSR_READ(sc, WMREG_PHPM);
   10192 		reg &= ~PHPM_GO_LINK_D;
   10193 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10194 	}
   10195 
   10196 	/*
   10197 	 * Let the chip set speed/duplex on its own based on
   10198 	 * signals from the PHY.
   10199 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10200 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10201 	 */
   10202 	sc->sc_ctrl |= CTRL_SLU;
   10203 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10204 
   10205 	/* Initialize our media structures and probe the GMII. */
   10206 	mii->mii_ifp = ifp;
   10207 
   10208 	mii->mii_statchg = wm_gmii_statchg;
   10209 
   10210 	/* get PHY control from SMBus to PCIe */
   10211 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10212 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10213 	    || (sc->sc_type == WM_T_PCH_CNP))
   10214 		wm_init_phy_workarounds_pchlan(sc);
   10215 
   10216 	wm_gmii_reset(sc);
   10217 
   10218 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10219 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10220 	    wm_gmii_mediastatus);
   10221 
   10222 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10223 	    || (sc->sc_type == WM_T_82580)
   10224 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10225 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10226 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10227 			/* Attach only one port */
   10228 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10229 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10230 		} else {
   10231 			int i, id;
   10232 			uint32_t ctrl_ext;
   10233 
   10234 			id = wm_get_phy_id_82575(sc);
   10235 			if (id != -1) {
   10236 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10237 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10238 			}
   10239 			if ((id == -1)
   10240 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10241 				/* Power on sgmii phy if it is disabled */
   10242 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10243 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10244 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10245 				CSR_WRITE_FLUSH(sc);
   10246 				delay(300*1000); /* XXX too long */
   10247 
   10248 				/* from 1 to 8 */
   10249 				for (i = 1; i < 8; i++)
   10250 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10251 					    0xffffffff, i, MII_OFFSET_ANY,
   10252 					    MIIF_DOPAUSE);
   10253 
   10254 				/* restore previous sfp cage power state */
   10255 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10256 			}
   10257 		}
   10258 	} else
   10259 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10260 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10261 
   10262 	/*
   10263 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10264 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10265 	 */
   10266 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10267 		|| (sc->sc_type == WM_T_PCH_SPT)
   10268 		|| (sc->sc_type == WM_T_PCH_CNP))
   10269 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10270 		wm_set_mdio_slow_mode_hv(sc);
   10271 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10272 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10273 	}
   10274 
   10275 	/*
   10276 	 * (For ICH8 variants)
   10277 	 * If PHY detection failed, use BM's r/w function and retry.
   10278 	 */
   10279 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10280 		/* if failed, retry with *_bm_* */
   10281 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10282 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10283 		    sc->sc_phytype);
   10284 		sc->sc_phytype = WMPHY_BM;
   10285 		mii->mii_readreg = wm_gmii_bm_readreg;
   10286 		mii->mii_writereg = wm_gmii_bm_writereg;
   10287 
   10288 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10289 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10290 	}
   10291 
   10292 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10293 		/* Any PHY wasn't find */
   10294 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10295 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10296 		sc->sc_phytype = WMPHY_NONE;
   10297 	} else {
   10298 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10299 
   10300 		/*
   10301 		 * PHY Found! Check PHY type again by the second call of
   10302 		 * wm_gmii_setup_phytype.
   10303 		 */
   10304 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10305 		    child->mii_mpd_model);
   10306 
   10307 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10308 	}
   10309 }
   10310 
   10311 /*
   10312  * wm_gmii_mediachange:	[ifmedia interface function]
   10313  *
   10314  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10315  */
   10316 static int
   10317 wm_gmii_mediachange(struct ifnet *ifp)
   10318 {
   10319 	struct wm_softc *sc = ifp->if_softc;
   10320 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10321 	int rc;
   10322 
   10323 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10324 		device_xname(sc->sc_dev), __func__));
   10325 	if ((ifp->if_flags & IFF_UP) == 0)
   10326 		return 0;
   10327 
   10328 	/* Disable D0 LPLU. */
   10329 	wm_lplu_d0_disable(sc);
   10330 
   10331 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10332 	sc->sc_ctrl |= CTRL_SLU;
   10333 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10334 	    || (sc->sc_type > WM_T_82543)) {
   10335 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10336 	} else {
   10337 		sc->sc_ctrl &= ~CTRL_ASDE;
   10338 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10339 		if (ife->ifm_media & IFM_FDX)
   10340 			sc->sc_ctrl |= CTRL_FD;
   10341 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10342 		case IFM_10_T:
   10343 			sc->sc_ctrl |= CTRL_SPEED_10;
   10344 			break;
   10345 		case IFM_100_TX:
   10346 			sc->sc_ctrl |= CTRL_SPEED_100;
   10347 			break;
   10348 		case IFM_1000_T:
   10349 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10350 			break;
   10351 		case IFM_NONE:
   10352 			/* There is no specific setting for IFM_NONE */
   10353 			break;
   10354 		default:
   10355 			panic("wm_gmii_mediachange: bad media 0x%x",
   10356 			    ife->ifm_media);
   10357 		}
   10358 	}
   10359 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10360 	CSR_WRITE_FLUSH(sc);
   10361 	if (sc->sc_type <= WM_T_82543)
   10362 		wm_gmii_reset(sc);
   10363 
   10364 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10365 		return 0;
   10366 	return rc;
   10367 }
   10368 
   10369 /*
   10370  * wm_gmii_mediastatus:	[ifmedia interface function]
   10371  *
   10372  *	Get the current interface media status on a 1000BASE-T device.
   10373  */
   10374 static void
   10375 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10376 {
   10377 	struct wm_softc *sc = ifp->if_softc;
   10378 
   10379 	ether_mediastatus(ifp, ifmr);
   10380 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10381 	    | sc->sc_flowflags;
   10382 }
   10383 
   10384 #define	MDI_IO		CTRL_SWDPIN(2)
   10385 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10386 #define	MDI_CLK		CTRL_SWDPIN(3)
   10387 
   10388 static void
   10389 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10390 {
   10391 	uint32_t i, v;
   10392 
   10393 	v = CSR_READ(sc, WMREG_CTRL);
   10394 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10395 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10396 
   10397 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10398 		if (data & i)
   10399 			v |= MDI_IO;
   10400 		else
   10401 			v &= ~MDI_IO;
   10402 		CSR_WRITE(sc, WMREG_CTRL, v);
   10403 		CSR_WRITE_FLUSH(sc);
   10404 		delay(10);
   10405 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10406 		CSR_WRITE_FLUSH(sc);
   10407 		delay(10);
   10408 		CSR_WRITE(sc, WMREG_CTRL, v);
   10409 		CSR_WRITE_FLUSH(sc);
   10410 		delay(10);
   10411 	}
   10412 }
   10413 
   10414 static uint16_t
   10415 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10416 {
   10417 	uint32_t v, i;
   10418 	uint16_t data = 0;
   10419 
   10420 	v = CSR_READ(sc, WMREG_CTRL);
   10421 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10422 	v |= CTRL_SWDPIO(3);
   10423 
   10424 	CSR_WRITE(sc, WMREG_CTRL, v);
   10425 	CSR_WRITE_FLUSH(sc);
   10426 	delay(10);
   10427 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10428 	CSR_WRITE_FLUSH(sc);
   10429 	delay(10);
   10430 	CSR_WRITE(sc, WMREG_CTRL, v);
   10431 	CSR_WRITE_FLUSH(sc);
   10432 	delay(10);
   10433 
   10434 	for (i = 0; i < 16; i++) {
   10435 		data <<= 1;
   10436 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10437 		CSR_WRITE_FLUSH(sc);
   10438 		delay(10);
   10439 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10440 			data |= 1;
   10441 		CSR_WRITE(sc, WMREG_CTRL, v);
   10442 		CSR_WRITE_FLUSH(sc);
   10443 		delay(10);
   10444 	}
   10445 
   10446 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10447 	CSR_WRITE_FLUSH(sc);
   10448 	delay(10);
   10449 	CSR_WRITE(sc, WMREG_CTRL, v);
   10450 	CSR_WRITE_FLUSH(sc);
   10451 	delay(10);
   10452 
   10453 	return data;
   10454 }
   10455 
   10456 #undef MDI_IO
   10457 #undef MDI_DIR
   10458 #undef MDI_CLK
   10459 
   10460 /*
   10461  * wm_gmii_i82543_readreg:	[mii interface function]
   10462  *
   10463  *	Read a PHY register on the GMII (i82543 version).
   10464  */
   10465 static int
   10466 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10467 {
   10468 	struct wm_softc *sc = device_private(dev);
   10469 
   10470 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10471 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10472 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10473 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10474 
   10475 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10476 		device_xname(dev), phy, reg, *val));
   10477 
   10478 	return 0;
   10479 }
   10480 
   10481 /*
   10482  * wm_gmii_i82543_writereg:	[mii interface function]
   10483  *
   10484  *	Write a PHY register on the GMII (i82543 version).
   10485  */
   10486 static int
   10487 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10488 {
   10489 	struct wm_softc *sc = device_private(dev);
   10490 
   10491 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10492 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10493 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10494 	    (MII_COMMAND_START << 30), 32);
   10495 
   10496 	return 0;
   10497 }
   10498 
   10499 /*
   10500  * wm_gmii_mdic_readreg:	[mii interface function]
   10501  *
   10502  *	Read a PHY register on the GMII.
   10503  */
   10504 static int
   10505 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10506 {
   10507 	struct wm_softc *sc = device_private(dev);
   10508 	uint32_t mdic = 0;
   10509 	int i;
   10510 
   10511 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10512 	    && (reg > MII_ADDRMASK)) {
   10513 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10514 		    __func__, sc->sc_phytype, reg);
   10515 		reg &= MII_ADDRMASK;
   10516 	}
   10517 
   10518 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10519 	    MDIC_REGADD(reg));
   10520 
   10521 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10522 		delay(50);
   10523 		mdic = CSR_READ(sc, WMREG_MDIC);
   10524 		if (mdic & MDIC_READY)
   10525 			break;
   10526 	}
   10527 
   10528 	if ((mdic & MDIC_READY) == 0) {
   10529 		DPRINTF(WM_DEBUG_GMII,
   10530 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10531 			device_xname(dev), phy, reg));
   10532 		return ETIMEDOUT;
   10533 	} else if (mdic & MDIC_E) {
   10534 		/* This is normal if no PHY is present. */
   10535 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10536 			device_xname(sc->sc_dev), phy, reg));
   10537 		return -1;
   10538 	} else
   10539 		*val = MDIC_DATA(mdic);
   10540 
   10541 	/*
   10542 	 * Allow some time after each MDIC transaction to avoid
   10543 	 * reading duplicate data in the next MDIC transaction.
   10544 	 */
   10545 	if (sc->sc_type == WM_T_PCH2)
   10546 		delay(100);
   10547 
   10548 	return 0;
   10549 }
   10550 
   10551 /*
   10552  * wm_gmii_mdic_writereg:	[mii interface function]
   10553  *
   10554  *	Write a PHY register on the GMII.
   10555  */
   10556 static int
   10557 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10558 {
   10559 	struct wm_softc *sc = device_private(dev);
   10560 	uint32_t mdic = 0;
   10561 	int i;
   10562 
   10563 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10564 	    && (reg > MII_ADDRMASK)) {
   10565 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10566 		    __func__, sc->sc_phytype, reg);
   10567 		reg &= MII_ADDRMASK;
   10568 	}
   10569 
   10570 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10571 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10572 
   10573 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10574 		delay(50);
   10575 		mdic = CSR_READ(sc, WMREG_MDIC);
   10576 		if (mdic & MDIC_READY)
   10577 			break;
   10578 	}
   10579 
   10580 	if ((mdic & MDIC_READY) == 0) {
   10581 		DPRINTF(WM_DEBUG_GMII,
   10582 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10583 			device_xname(dev), phy, reg));
   10584 		return ETIMEDOUT;
   10585 	} else if (mdic & MDIC_E) {
   10586 		DPRINTF(WM_DEBUG_GMII,
   10587 		    ("%s: MDIC write error: phy %d reg %d\n",
   10588 			device_xname(dev), phy, reg));
   10589 		return -1;
   10590 	}
   10591 
   10592 	/*
   10593 	 * Allow some time after each MDIC transaction to avoid
   10594 	 * reading duplicate data in the next MDIC transaction.
   10595 	 */
   10596 	if (sc->sc_type == WM_T_PCH2)
   10597 		delay(100);
   10598 
   10599 	return 0;
   10600 }
   10601 
   10602 /*
   10603  * wm_gmii_i82544_readreg:	[mii interface function]
   10604  *
   10605  *	Read a PHY register on the GMII.
   10606  */
   10607 static int
   10608 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10609 {
   10610 	struct wm_softc *sc = device_private(dev);
   10611 	int rv;
   10612 
   10613 	if (sc->phy.acquire(sc)) {
   10614 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10615 		return -1;
   10616 	}
   10617 
   10618 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10619 
   10620 	sc->phy.release(sc);
   10621 
   10622 	return rv;
   10623 }
   10624 
   10625 static int
   10626 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10627 {
   10628 	struct wm_softc *sc = device_private(dev);
   10629 
   10630 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10631 		switch (sc->sc_phytype) {
   10632 		case WMPHY_IGP:
   10633 		case WMPHY_IGP_2:
   10634 		case WMPHY_IGP_3:
   10635 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10636 			    reg);
   10637 			break;
   10638 		default:
   10639 #ifdef WM_DEBUG
   10640 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10641 			    __func__, sc->sc_phytype, reg);
   10642 #endif
   10643 			break;
   10644 		}
   10645 	}
   10646 
   10647 	wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10648 
   10649 	return 0;
   10650 }
   10651 
   10652 /*
   10653  * wm_gmii_i82544_writereg:	[mii interface function]
   10654  *
   10655  *	Write a PHY register on the GMII.
   10656  */
   10657 static int
   10658 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10659 {
   10660 	struct wm_softc *sc = device_private(dev);
   10661 	int rv;
   10662 
   10663 	if (sc->phy.acquire(sc)) {
   10664 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10665 		return -1;
   10666 	}
   10667 
   10668 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10669 	sc->phy.release(sc);
   10670 
   10671 	return rv;
   10672 }
   10673 
   10674 static int
   10675 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10676 {
   10677 	struct wm_softc *sc = device_private(dev);
   10678 
   10679 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10680 		switch (sc->sc_phytype) {
   10681 		case WMPHY_IGP:
   10682 		case WMPHY_IGP_2:
   10683 		case WMPHY_IGP_3:
   10684 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10685 			    reg);
   10686 			break;
   10687 		default:
   10688 #ifdef WM_DEBUG
   10689 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10690 			    __func__, sc->sc_phytype, reg);
   10691 #endif
   10692 			break;
   10693 		}
   10694 	}
   10695 
   10696 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10697 
   10698 	return 0;
   10699 }
   10700 
   10701 /*
   10702  * wm_gmii_i80003_readreg:	[mii interface function]
   10703  *
   10704  *	Read a PHY register on the kumeran
   10705  * This could be handled by the PHY layer if we didn't have to lock the
   10706  * ressource ...
   10707  */
   10708 static int
   10709 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10710 {
   10711 	struct wm_softc *sc = device_private(dev);
   10712 	int page_select;
   10713 	uint16_t temp, temp2;
   10714 	int rv = 0;
   10715 
   10716 	if (phy != 1) /* only one PHY on kumeran bus */
   10717 		return -1;
   10718 
   10719 	if (sc->phy.acquire(sc)) {
   10720 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10721 		return -1;
   10722 	}
   10723 
   10724 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10725 		page_select = GG82563_PHY_PAGE_SELECT;
   10726 	else {
   10727 		/*
   10728 		 * Use Alternative Page Select register to access registers
   10729 		 * 30 and 31.
   10730 		 */
   10731 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10732 	}
   10733 	temp = reg >> GG82563_PAGE_SHIFT;
   10734 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10735 		goto out;
   10736 
   10737 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10738 		/*
   10739 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10740 		 * register.
   10741 		 */
   10742 		delay(200);
   10743 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10744 		if (temp2 != temp) {
   10745 			device_printf(dev, "%s failed\n", __func__);
   10746 			rv = -1;
   10747 			goto out;
   10748 		}
   10749 		delay(200);
   10750 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10751 		delay(200);
   10752 	} else
   10753 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10754 
   10755 out:
   10756 	sc->phy.release(sc);
   10757 	return rv;
   10758 }
   10759 
   10760 /*
   10761  * wm_gmii_i80003_writereg:	[mii interface function]
   10762  *
   10763  *	Write a PHY register on the kumeran.
   10764  * This could be handled by the PHY layer if we didn't have to lock the
   10765  * ressource ...
   10766  */
   10767 static int
   10768 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10769 {
   10770 	struct wm_softc *sc = device_private(dev);
   10771 	int page_select, rv;
   10772 	uint16_t temp, temp2;
   10773 
   10774 	if (phy != 1) /* only one PHY on kumeran bus */
   10775 		return -1;
   10776 
   10777 	if (sc->phy.acquire(sc)) {
   10778 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10779 		return -1;
   10780 	}
   10781 
   10782 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10783 		page_select = GG82563_PHY_PAGE_SELECT;
   10784 	else {
   10785 		/*
   10786 		 * Use Alternative Page Select register to access registers
   10787 		 * 30 and 31.
   10788 		 */
   10789 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10790 	}
   10791 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10792 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10793 		goto out;
   10794 
   10795 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10796 		/*
   10797 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10798 		 * register.
   10799 		 */
   10800 		delay(200);
   10801 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10802 		if (temp2 != temp) {
   10803 			device_printf(dev, "%s failed\n", __func__);
   10804 			rv = -1;
   10805 			goto out;
   10806 		}
   10807 		delay(200);
   10808 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10809 		delay(200);
   10810 	} else
   10811 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10812 
   10813 out:
   10814 	sc->phy.release(sc);
   10815 	return rv;
   10816 }
   10817 
   10818 /*
   10819  * wm_gmii_bm_readreg:	[mii interface function]
   10820  *
   10821  *	Read a PHY register on the kumeran
   10822  * This could be handled by the PHY layer if we didn't have to lock the
   10823  * ressource ...
   10824  */
   10825 static int
   10826 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10827 {
   10828 	struct wm_softc *sc = device_private(dev);
   10829 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10830 	int rv;
   10831 
   10832 	if (sc->phy.acquire(sc)) {
   10833 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10834 		return -1;
   10835 	}
   10836 
   10837 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10838 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10839 		    || (reg == 31)) ? 1 : phy;
   10840 	/* Page 800 works differently than the rest so it has its own func */
   10841 	if (page == BM_WUC_PAGE) {
   10842 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10843 		goto release;
   10844 	}
   10845 
   10846 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10847 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10848 		    && (sc->sc_type != WM_T_82583))
   10849 			rv = wm_gmii_mdic_writereg(dev, phy,
   10850 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10851 		else
   10852 			rv = wm_gmii_mdic_writereg(dev, phy,
   10853 			    BME1000_PHY_PAGE_SELECT, page);
   10854 		if (rv != 0)
   10855 			goto release;
   10856 	}
   10857 
   10858 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10859 
   10860 release:
   10861 	sc->phy.release(sc);
   10862 	return rv;
   10863 }
   10864 
   10865 /*
   10866  * wm_gmii_bm_writereg:	[mii interface function]
   10867  *
   10868  *	Write a PHY register on the kumeran.
   10869  * This could be handled by the PHY layer if we didn't have to lock the
   10870  * ressource ...
   10871  */
   10872 static int
   10873 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10874 {
   10875 	struct wm_softc *sc = device_private(dev);
   10876 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10877 	int rv;
   10878 
   10879 	if (sc->phy.acquire(sc)) {
   10880 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10881 		return -1;
   10882 	}
   10883 
   10884 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10885 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10886 		    || (reg == 31)) ? 1 : phy;
   10887 	/* Page 800 works differently than the rest so it has its own func */
   10888 	if (page == BM_WUC_PAGE) {
   10889 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10890 		goto release;
   10891 	}
   10892 
   10893 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10894 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10895 		    && (sc->sc_type != WM_T_82583))
   10896 			rv = wm_gmii_mdic_writereg(dev, phy,
   10897 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10898 		else
   10899 			rv = wm_gmii_mdic_writereg(dev, phy,
   10900 			    BME1000_PHY_PAGE_SELECT, page);
   10901 		if (rv != 0)
   10902 			goto release;
   10903 	}
   10904 
   10905 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10906 
   10907 release:
   10908 	sc->phy.release(sc);
   10909 	return rv;
   10910 }
   10911 
   10912 /*
   10913  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10914  *  @dev: pointer to the HW structure
   10915  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10916  *
   10917  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10918  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10919  */
   10920 static int
   10921 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10922 {
   10923 	uint16_t temp;
   10924 	int rv;
   10925 
   10926 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10927 		device_xname(dev), __func__));
   10928 
   10929 	if (!phy_regp)
   10930 		return -1;
   10931 
   10932 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10933 
   10934 	/* Select Port Control Registers page */
   10935 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10936 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10937 	if (rv != 0)
   10938 		return rv;
   10939 
   10940 	/* Read WUCE and save it */
   10941 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10942 	if (rv != 0)
   10943 		return rv;
   10944 
   10945 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10946 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10947 	 */
   10948 	temp = *phy_regp;
   10949 	temp |= BM_WUC_ENABLE_BIT;
   10950 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10951 
   10952 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10953 		return rv;
   10954 
   10955 	/* Select Host Wakeup Registers page - caller now able to write
   10956 	 * registers on the Wakeup registers page
   10957 	 */
   10958 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10959 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10960 }
   10961 
   10962 /*
   10963  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10964  *  @dev: pointer to the HW structure
   10965  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10966  *
   10967  *  Restore BM_WUC_ENABLE_REG to its original value.
   10968  *
   10969  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10970  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10971  *  caller.
   10972  */
   10973 static int
   10974 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10975 {
   10976 
   10977 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10978 		device_xname(dev), __func__));
   10979 
   10980 	if (!phy_regp)
   10981 		return -1;
   10982 
   10983 	/* Select Port Control Registers page */
   10984 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10985 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10986 
   10987 	/* Restore 769.17 to its original value */
   10988 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10989 
   10990 	return 0;
   10991 }
   10992 
   10993 /*
   10994  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10995  *  @sc: pointer to the HW structure
   10996  *  @offset: register offset to be read or written
   10997  *  @val: pointer to the data to read or write
   10998  *  @rd: determines if operation is read or write
   10999  *  @page_set: BM_WUC_PAGE already set and access enabled
   11000  *
   11001  *  Read the PHY register at offset and store the retrieved information in
   11002  *  data, or write data to PHY register at offset.  Note the procedure to
   11003  *  access the PHY wakeup registers is different than reading the other PHY
   11004  *  registers. It works as such:
   11005  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11006  *  2) Set page to 800 for host (801 if we were manageability)
   11007  *  3) Write the address using the address opcode (0x11)
   11008  *  4) Read or write the data using the data opcode (0x12)
   11009  *  5) Restore 769.17.2 to its original value
   11010  *
   11011  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11012  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11013  *
   11014  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11015  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11016  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11017  */
   11018 static int
   11019 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11020 	bool page_set)
   11021 {
   11022 	struct wm_softc *sc = device_private(dev);
   11023 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11024 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11025 	uint16_t wuce;
   11026 	int rv = 0;
   11027 
   11028 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11029 		device_xname(dev), __func__));
   11030 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11031 	if ((sc->sc_type == WM_T_PCH)
   11032 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11033 		device_printf(dev,
   11034 		    "Attempting to access page %d while gig enabled.\n", page);
   11035 	}
   11036 
   11037 	if (!page_set) {
   11038 		/* Enable access to PHY wakeup registers */
   11039 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11040 		if (rv != 0) {
   11041 			device_printf(dev,
   11042 			    "%s: Could not enable PHY wakeup reg access\n",
   11043 			    __func__);
   11044 			return rv;
   11045 		}
   11046 	}
   11047 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11048 		device_xname(sc->sc_dev), __func__, page, regnum));
   11049 
   11050 	/*
   11051 	 * 2) Access PHY wakeup register.
   11052 	 * See wm_access_phy_wakeup_reg_bm.
   11053 	 */
   11054 
   11055 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11056 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11057 	if (rv != 0)
   11058 		return rv;
   11059 
   11060 	if (rd) {
   11061 		/* Read the Wakeup register page value using opcode 0x12 */
   11062 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11063 	} else {
   11064 		/* Write the Wakeup register page value using opcode 0x12 */
   11065 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11066 	}
   11067 	if (rv != 0)
   11068 		return rv;
   11069 
   11070 	if (!page_set)
   11071 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11072 
   11073 	return rv;
   11074 }
   11075 
   11076 /*
   11077  * wm_gmii_hv_readreg:	[mii interface function]
   11078  *
   11079  *	Read a PHY register on the kumeran
   11080  * This could be handled by the PHY layer if we didn't have to lock the
   11081  * ressource ...
   11082  */
   11083 static int
   11084 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11085 {
   11086 	struct wm_softc *sc = device_private(dev);
   11087 	int rv;
   11088 
   11089 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11090 		device_xname(dev), __func__));
   11091 	if (sc->phy.acquire(sc)) {
   11092 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11093 		return -1;
   11094 	}
   11095 
   11096 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11097 	sc->phy.release(sc);
   11098 	return rv;
   11099 }
   11100 
   11101 static int
   11102 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11103 {
   11104 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11105 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11106 	int rv;
   11107 
   11108 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11109 
   11110 	/* Page 800 works differently than the rest so it has its own func */
   11111 	if (page == BM_WUC_PAGE)
   11112 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11113 
   11114 	/*
   11115 	 * Lower than page 768 works differently than the rest so it has its
   11116 	 * own func
   11117 	 */
   11118 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11119 		printf("gmii_hv_readreg!!!\n");
   11120 		return -1;
   11121 	}
   11122 
   11123 	/*
   11124 	 * XXX I21[789] documents say that the SMBus Address register is at
   11125 	 * PHY address 01, Page 0 (not 768), Register 26.
   11126 	 */
   11127 	if (page == HV_INTC_FC_PAGE_START)
   11128 		page = 0;
   11129 
   11130 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11131 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11132 		    page << BME1000_PAGE_SHIFT);
   11133 		if (rv != 0)
   11134 			return rv;
   11135 	}
   11136 
   11137 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11138 }
   11139 
   11140 /*
   11141  * wm_gmii_hv_writereg:	[mii interface function]
   11142  *
   11143  *	Write a PHY register on the kumeran.
   11144  * This could be handled by the PHY layer if we didn't have to lock the
   11145  * ressource ...
   11146  */
   11147 static int
   11148 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11149 {
   11150 	struct wm_softc *sc = device_private(dev);
   11151 	int rv;
   11152 
   11153 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11154 		device_xname(dev), __func__));
   11155 
   11156 	if (sc->phy.acquire(sc)) {
   11157 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11158 		return -1;
   11159 	}
   11160 
   11161 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11162 	sc->phy.release(sc);
   11163 
   11164 	return rv;
   11165 }
   11166 
   11167 static int
   11168 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11169 {
   11170 	struct wm_softc *sc = device_private(dev);
   11171 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11172 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11173 	int rv;
   11174 
   11175 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11176 
   11177 	/* Page 800 works differently than the rest so it has its own func */
   11178 	if (page == BM_WUC_PAGE)
   11179 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11180 		    false);
   11181 
   11182 	/*
   11183 	 * Lower than page 768 works differently than the rest so it has its
   11184 	 * own func
   11185 	 */
   11186 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11187 		printf("gmii_hv_writereg!!!\n");
   11188 		return -1;
   11189 	}
   11190 
   11191 	{
   11192 		/*
   11193 		 * XXX I21[789] documents say that the SMBus Address register
   11194 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11195 		 */
   11196 		if (page == HV_INTC_FC_PAGE_START)
   11197 			page = 0;
   11198 
   11199 		/*
   11200 		 * XXX Workaround MDIO accesses being disabled after entering
   11201 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11202 		 * register is set)
   11203 		 */
   11204 		if (sc->sc_phytype == WMPHY_82578) {
   11205 			struct mii_softc *child;
   11206 
   11207 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11208 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11209 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11210 			    && ((val & (1 << 11)) != 0)) {
   11211 				printf("XXX need workaround\n");
   11212 			}
   11213 		}
   11214 
   11215 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11216 			rv = wm_gmii_mdic_writereg(dev, 1,
   11217 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11218 			if (rv != 0)
   11219 				return rv;
   11220 		}
   11221 	}
   11222 
   11223 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11224 }
   11225 
   11226 /*
   11227  * wm_gmii_82580_readreg:	[mii interface function]
   11228  *
   11229  *	Read a PHY register on the 82580 and I350.
   11230  * This could be handled by the PHY layer if we didn't have to lock the
   11231  * ressource ...
   11232  */
   11233 static int
   11234 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11235 {
   11236 	struct wm_softc *sc = device_private(dev);
   11237 	int rv;
   11238 
   11239 	if (sc->phy.acquire(sc) != 0) {
   11240 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11241 		return -1;
   11242 	}
   11243 
   11244 #ifdef DIAGNOSTIC
   11245 	if (reg > MII_ADDRMASK) {
   11246 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11247 		    __func__, sc->sc_phytype, reg);
   11248 		reg &= MII_ADDRMASK;
   11249 	}
   11250 #endif
   11251 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11252 
   11253 	sc->phy.release(sc);
   11254 	return rv;
   11255 }
   11256 
   11257 /*
   11258  * wm_gmii_82580_writereg:	[mii interface function]
   11259  *
   11260  *	Write a PHY register on the 82580 and I350.
   11261  * This could be handled by the PHY layer if we didn't have to lock the
   11262  * ressource ...
   11263  */
   11264 static int
   11265 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11266 {
   11267 	struct wm_softc *sc = device_private(dev);
   11268 	int rv;
   11269 
   11270 	if (sc->phy.acquire(sc) != 0) {
   11271 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11272 		return -1;
   11273 	}
   11274 
   11275 #ifdef DIAGNOSTIC
   11276 	if (reg > MII_ADDRMASK) {
   11277 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11278 		    __func__, sc->sc_phytype, reg);
   11279 		reg &= MII_ADDRMASK;
   11280 	}
   11281 #endif
   11282 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11283 
   11284 	sc->phy.release(sc);
   11285 	return rv;
   11286 }
   11287 
   11288 /*
   11289  * wm_gmii_gs40g_readreg:	[mii interface function]
   11290  *
   11291  *	Read a PHY register on the I2100 and I211.
   11292  * This could be handled by the PHY layer if we didn't have to lock the
   11293  * ressource ...
   11294  */
   11295 static int
   11296 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11297 {
   11298 	struct wm_softc *sc = device_private(dev);
   11299 	int page, offset;
   11300 	int rv;
   11301 
   11302 	/* Acquire semaphore */
   11303 	if (sc->phy.acquire(sc)) {
   11304 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11305 		return -1;
   11306 	}
   11307 
   11308 	/* Page select */
   11309 	page = reg >> GS40G_PAGE_SHIFT;
   11310 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11311 	if (rv != 0)
   11312 		goto release;
   11313 
   11314 	/* Read reg */
   11315 	offset = reg & GS40G_OFFSET_MASK;
   11316 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11317 
   11318 release:
   11319 	sc->phy.release(sc);
   11320 	return rv;
   11321 }
   11322 
   11323 /*
   11324  * wm_gmii_gs40g_writereg:	[mii interface function]
   11325  *
   11326  *	Write a PHY register on the I210 and I211.
   11327  * This could be handled by the PHY layer if we didn't have to lock the
   11328  * ressource ...
   11329  */
   11330 static int
   11331 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11332 {
   11333 	struct wm_softc *sc = device_private(dev);
   11334 	uint16_t page;
   11335 	int offset, rv;
   11336 
   11337 	/* Acquire semaphore */
   11338 	if (sc->phy.acquire(sc)) {
   11339 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11340 		return -1;
   11341 	}
   11342 
   11343 	/* Page select */
   11344 	page = reg >> GS40G_PAGE_SHIFT;
   11345 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11346 	if (rv != 0)
   11347 		goto release;
   11348 
   11349 	/* Write reg */
   11350 	offset = reg & GS40G_OFFSET_MASK;
   11351 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11352 
   11353 release:
   11354 	/* Release semaphore */
   11355 	sc->phy.release(sc);
   11356 	return rv;
   11357 }
   11358 
   11359 /*
   11360  * wm_gmii_statchg:	[mii interface function]
   11361  *
   11362  *	Callback from MII layer when media changes.
   11363  */
   11364 static void
   11365 wm_gmii_statchg(struct ifnet *ifp)
   11366 {
   11367 	struct wm_softc *sc = ifp->if_softc;
   11368 	struct mii_data *mii = &sc->sc_mii;
   11369 
   11370 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11371 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11372 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11373 
   11374 	/*
   11375 	 * Get flow control negotiation result.
   11376 	 */
   11377 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11378 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11379 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11380 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11381 	}
   11382 
   11383 	if (sc->sc_flowflags & IFM_FLOW) {
   11384 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11385 			sc->sc_ctrl |= CTRL_TFCE;
   11386 			sc->sc_fcrtl |= FCRTL_XONE;
   11387 		}
   11388 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11389 			sc->sc_ctrl |= CTRL_RFCE;
   11390 	}
   11391 
   11392 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11393 		DPRINTF(WM_DEBUG_LINK,
   11394 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11395 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11396 	} else {
   11397 		DPRINTF(WM_DEBUG_LINK,
   11398 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11399 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11400 	}
   11401 
   11402 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11403 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11404 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11405 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11406 	if (sc->sc_type == WM_T_80003) {
   11407 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11408 		case IFM_1000_T:
   11409 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11410 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11411 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11412 			break;
   11413 		default:
   11414 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11415 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11416 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11417 			break;
   11418 		}
   11419 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11420 	}
   11421 }
   11422 
   11423 /* kumeran related (80003, ICH* and PCH*) */
   11424 
   11425 /*
   11426  * wm_kmrn_readreg:
   11427  *
   11428  *	Read a kumeran register
   11429  */
   11430 static int
   11431 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11432 {
   11433 	int rv;
   11434 
   11435 	if (sc->sc_type == WM_T_80003)
   11436 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11437 	else
   11438 		rv = sc->phy.acquire(sc);
   11439 	if (rv != 0) {
   11440 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11441 		    __func__);
   11442 		return rv;
   11443 	}
   11444 
   11445 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11446 
   11447 	if (sc->sc_type == WM_T_80003)
   11448 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11449 	else
   11450 		sc->phy.release(sc);
   11451 
   11452 	return rv;
   11453 }
   11454 
   11455 static int
   11456 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11457 {
   11458 
   11459 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11460 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11461 	    KUMCTRLSTA_REN);
   11462 	CSR_WRITE_FLUSH(sc);
   11463 	delay(2);
   11464 
   11465 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11466 
   11467 	return 0;
   11468 }
   11469 
   11470 /*
   11471  * wm_kmrn_writereg:
   11472  *
   11473  *	Write a kumeran register
   11474  */
   11475 static int
   11476 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11477 {
   11478 	int rv;
   11479 
   11480 	if (sc->sc_type == WM_T_80003)
   11481 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11482 	else
   11483 		rv = sc->phy.acquire(sc);
   11484 	if (rv != 0) {
   11485 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11486 		    __func__);
   11487 		return rv;
   11488 	}
   11489 
   11490 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11491 
   11492 	if (sc->sc_type == WM_T_80003)
   11493 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11494 	else
   11495 		sc->phy.release(sc);
   11496 
   11497 	return rv;
   11498 }
   11499 
   11500 static int
   11501 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11502 {
   11503 
   11504 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11505 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11506 
   11507 	return 0;
   11508 }
   11509 
   11510 /*
   11511  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11512  * This access method is different from IEEE MMD.
   11513  */
   11514 static int
   11515 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11516 {
   11517 	struct wm_softc *sc = device_private(dev);
   11518 	int rv;
   11519 
   11520 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11521 	if (rv != 0)
   11522 		return rv;
   11523 
   11524 	if (rd)
   11525 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11526 	else
   11527 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11528 	return rv;
   11529 }
   11530 
   11531 static int
   11532 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11533 {
   11534 
   11535 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11536 }
   11537 
   11538 static int
   11539 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11540 {
   11541 
   11542 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11543 }
   11544 
   11545 /* SGMII related */
   11546 
   11547 /*
   11548  * wm_sgmii_uses_mdio
   11549  *
   11550  * Check whether the transaction is to the internal PHY or the external
   11551  * MDIO interface. Return true if it's MDIO.
   11552  */
   11553 static bool
   11554 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11555 {
   11556 	uint32_t reg;
   11557 	bool ismdio = false;
   11558 
   11559 	switch (sc->sc_type) {
   11560 	case WM_T_82575:
   11561 	case WM_T_82576:
   11562 		reg = CSR_READ(sc, WMREG_MDIC);
   11563 		ismdio = ((reg & MDIC_DEST) != 0);
   11564 		break;
   11565 	case WM_T_82580:
   11566 	case WM_T_I350:
   11567 	case WM_T_I354:
   11568 	case WM_T_I210:
   11569 	case WM_T_I211:
   11570 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11571 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11572 		break;
   11573 	default:
   11574 		break;
   11575 	}
   11576 
   11577 	return ismdio;
   11578 }
   11579 
   11580 /*
   11581  * wm_sgmii_readreg:	[mii interface function]
   11582  *
   11583  *	Read a PHY register on the SGMII
   11584  * This could be handled by the PHY layer if we didn't have to lock the
   11585  * ressource ...
   11586  */
   11587 static int
   11588 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11589 {
   11590 	struct wm_softc *sc = device_private(dev);
   11591 	int rv;
   11592 
   11593 	if (sc->phy.acquire(sc)) {
   11594 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11595 		return -1;
   11596 	}
   11597 
   11598 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11599 
   11600 	sc->phy.release(sc);
   11601 	return rv;
   11602 }
   11603 
   11604 static int
   11605 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11606 {
   11607 	struct wm_softc *sc = device_private(dev);
   11608 	uint32_t i2ccmd;
   11609 	int i, rv;
   11610 
   11611 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11612 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11613 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11614 
   11615 	/* Poll the ready bit */
   11616 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11617 		delay(50);
   11618 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11619 		if (i2ccmd & I2CCMD_READY)
   11620 			break;
   11621 	}
   11622 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11623 		device_printf(dev, "I2CCMD Read did not complete\n");
   11624 		rv = ETIMEDOUT;
   11625 	}
   11626 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11627 		device_printf(dev, "I2CCMD Error bit set\n");
   11628 		rv = EIO;
   11629 	}
   11630 
   11631 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11632 
   11633 	return rv;
   11634 }
   11635 
   11636 /*
   11637  * wm_sgmii_writereg:	[mii interface function]
   11638  *
   11639  *	Write a PHY register on the SGMII.
   11640  * This could be handled by the PHY layer if we didn't have to lock the
   11641  * ressource ...
   11642  */
   11643 static int
   11644 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11645 {
   11646 	struct wm_softc *sc = device_private(dev);
   11647 	int rv;
   11648 
   11649 	if (sc->phy.acquire(sc) != 0) {
   11650 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11651 		return -1;
   11652 	}
   11653 
   11654 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11655 
   11656 	sc->phy.release(sc);
   11657 
   11658 	return rv;
   11659 }
   11660 
   11661 static int
   11662 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11663 {
   11664 	struct wm_softc *sc = device_private(dev);
   11665 	uint32_t i2ccmd;
   11666 	uint16_t swapdata;
   11667 	int rv = 0;
   11668 	int i;
   11669 
   11670 	/* Swap the data bytes for the I2C interface */
   11671 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11672 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11673 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11674 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11675 
   11676 	/* Poll the ready bit */
   11677 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11678 		delay(50);
   11679 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11680 		if (i2ccmd & I2CCMD_READY)
   11681 			break;
   11682 	}
   11683 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11684 		device_printf(dev, "I2CCMD Write did not complete\n");
   11685 		rv = ETIMEDOUT;
   11686 	}
   11687 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11688 		device_printf(dev, "I2CCMD Error bit set\n");
   11689 		rv = EIO;
   11690 	}
   11691 
   11692 	return rv;
   11693 }
   11694 
   11695 /* TBI related */
   11696 
   11697 static bool
   11698 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11699 {
   11700 	bool sig;
   11701 
   11702 	sig = ctrl & CTRL_SWDPIN(1);
   11703 
   11704 	/*
   11705 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11706 	 * detect a signal, 1 if they don't.
   11707 	 */
   11708 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11709 		sig = !sig;
   11710 
   11711 	return sig;
   11712 }
   11713 
   11714 /*
   11715  * wm_tbi_mediainit:
   11716  *
   11717  *	Initialize media for use on 1000BASE-X devices.
   11718  */
   11719 static void
   11720 wm_tbi_mediainit(struct wm_softc *sc)
   11721 {
   11722 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11723 	const char *sep = "";
   11724 
   11725 	if (sc->sc_type < WM_T_82543)
   11726 		sc->sc_tipg = TIPG_WM_DFLT;
   11727 	else
   11728 		sc->sc_tipg = TIPG_LG_DFLT;
   11729 
   11730 	sc->sc_tbi_serdes_anegticks = 5;
   11731 
   11732 	/* Initialize our media structures */
   11733 	sc->sc_mii.mii_ifp = ifp;
   11734 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11735 
   11736 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11737 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11738 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11739 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11740 	else
   11741 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11742 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11743 
   11744 	/*
   11745 	 * SWD Pins:
   11746 	 *
   11747 	 *	0 = Link LED (output)
   11748 	 *	1 = Loss Of Signal (input)
   11749 	 */
   11750 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11751 
   11752 	/* XXX Perhaps this is only for TBI */
   11753 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11754 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11755 
   11756 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11757 		sc->sc_ctrl &= ~CTRL_LRST;
   11758 
   11759 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11760 
   11761 #define	ADD(ss, mm, dd)							\
   11762 do {									\
   11763 	aprint_normal("%s%s", sep, ss);					\
   11764 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11765 	sep = ", ";							\
   11766 } while (/*CONSTCOND*/0)
   11767 
   11768 	aprint_normal_dev(sc->sc_dev, "");
   11769 
   11770 	if (sc->sc_type == WM_T_I354) {
   11771 		uint32_t status;
   11772 
   11773 		status = CSR_READ(sc, WMREG_STATUS);
   11774 		if (((status & STATUS_2P5_SKU) != 0)
   11775 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11776 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11777 		} else
   11778 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11779 	} else if (sc->sc_type == WM_T_82545) {
   11780 		/* Only 82545 is LX (XXX except SFP) */
   11781 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11782 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11783 	} else {
   11784 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11785 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11786 	}
   11787 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11788 	aprint_normal("\n");
   11789 
   11790 #undef ADD
   11791 
   11792 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11793 }
   11794 
   11795 /*
   11796  * wm_tbi_mediachange:	[ifmedia interface function]
   11797  *
   11798  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11799  */
   11800 static int
   11801 wm_tbi_mediachange(struct ifnet *ifp)
   11802 {
   11803 	struct wm_softc *sc = ifp->if_softc;
   11804 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11805 	uint32_t status, ctrl;
   11806 	bool signal;
   11807 	int i;
   11808 
   11809 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11810 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11811 		/* XXX need some work for >= 82571 and < 82575 */
   11812 		if (sc->sc_type < WM_T_82575)
   11813 			return 0;
   11814 	}
   11815 
   11816 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11817 	    || (sc->sc_type >= WM_T_82575))
   11818 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11819 
   11820 	sc->sc_ctrl &= ~CTRL_LRST;
   11821 	sc->sc_txcw = TXCW_ANE;
   11822 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11823 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11824 	else if (ife->ifm_media & IFM_FDX)
   11825 		sc->sc_txcw |= TXCW_FD;
   11826 	else
   11827 		sc->sc_txcw |= TXCW_HD;
   11828 
   11829 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11830 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11831 
   11832 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11833 		device_xname(sc->sc_dev), sc->sc_txcw));
   11834 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11835 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11836 	CSR_WRITE_FLUSH(sc);
   11837 	delay(1000);
   11838 
   11839 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11840 	signal = wm_tbi_havesignal(sc, ctrl);
   11841 
   11842 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11843 		signal));
   11844 
   11845 	if (signal) {
   11846 		/* Have signal; wait for the link to come up. */
   11847 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11848 			delay(10000);
   11849 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11850 				break;
   11851 		}
   11852 
   11853 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11854 			device_xname(sc->sc_dev),i));
   11855 
   11856 		status = CSR_READ(sc, WMREG_STATUS);
   11857 		DPRINTF(WM_DEBUG_LINK,
   11858 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11859 			device_xname(sc->sc_dev),status, STATUS_LU));
   11860 		if (status & STATUS_LU) {
   11861 			/* Link is up. */
   11862 			DPRINTF(WM_DEBUG_LINK,
   11863 			    ("%s: LINK: set media -> link up %s\n",
   11864 				device_xname(sc->sc_dev),
   11865 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11866 
   11867 			/*
   11868 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11869 			 * so we should update sc->sc_ctrl
   11870 			 */
   11871 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11872 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11873 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11874 			if (status & STATUS_FD)
   11875 				sc->sc_tctl |=
   11876 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11877 			else
   11878 				sc->sc_tctl |=
   11879 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11880 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11881 				sc->sc_fcrtl |= FCRTL_XONE;
   11882 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11883 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11884 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11885 			sc->sc_tbi_linkup = 1;
   11886 		} else {
   11887 			if (i == WM_LINKUP_TIMEOUT)
   11888 				wm_check_for_link(sc);
   11889 			/* Link is down. */
   11890 			DPRINTF(WM_DEBUG_LINK,
   11891 			    ("%s: LINK: set media -> link down\n",
   11892 				device_xname(sc->sc_dev)));
   11893 			sc->sc_tbi_linkup = 0;
   11894 		}
   11895 	} else {
   11896 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11897 			device_xname(sc->sc_dev)));
   11898 		sc->sc_tbi_linkup = 0;
   11899 	}
   11900 
   11901 	wm_tbi_serdes_set_linkled(sc);
   11902 
   11903 	return 0;
   11904 }
   11905 
   11906 /*
   11907  * wm_tbi_mediastatus:	[ifmedia interface function]
   11908  *
   11909  *	Get the current interface media status on a 1000BASE-X device.
   11910  */
   11911 static void
   11912 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11913 {
   11914 	struct wm_softc *sc = ifp->if_softc;
   11915 	uint32_t ctrl, status;
   11916 
   11917 	ifmr->ifm_status = IFM_AVALID;
   11918 	ifmr->ifm_active = IFM_ETHER;
   11919 
   11920 	status = CSR_READ(sc, WMREG_STATUS);
   11921 	if ((status & STATUS_LU) == 0) {
   11922 		ifmr->ifm_active |= IFM_NONE;
   11923 		return;
   11924 	}
   11925 
   11926 	ifmr->ifm_status |= IFM_ACTIVE;
   11927 	/* Only 82545 is LX */
   11928 	if (sc->sc_type == WM_T_82545)
   11929 		ifmr->ifm_active |= IFM_1000_LX;
   11930 	else
   11931 		ifmr->ifm_active |= IFM_1000_SX;
   11932 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11933 		ifmr->ifm_active |= IFM_FDX;
   11934 	else
   11935 		ifmr->ifm_active |= IFM_HDX;
   11936 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11937 	if (ctrl & CTRL_RFCE)
   11938 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11939 	if (ctrl & CTRL_TFCE)
   11940 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11941 }
   11942 
   11943 /* XXX TBI only */
   11944 static int
   11945 wm_check_for_link(struct wm_softc *sc)
   11946 {
   11947 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11948 	uint32_t rxcw;
   11949 	uint32_t ctrl;
   11950 	uint32_t status;
   11951 	bool signal;
   11952 
   11953 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11954 		device_xname(sc->sc_dev), __func__));
   11955 
   11956 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11957 		/* XXX need some work for >= 82571 */
   11958 		if (sc->sc_type >= WM_T_82571) {
   11959 			sc->sc_tbi_linkup = 1;
   11960 			return 0;
   11961 		}
   11962 	}
   11963 
   11964 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11965 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11966 	status = CSR_READ(sc, WMREG_STATUS);
   11967 	signal = wm_tbi_havesignal(sc, ctrl);
   11968 
   11969 	DPRINTF(WM_DEBUG_LINK,
   11970 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11971 		device_xname(sc->sc_dev), __func__, signal,
   11972 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11973 
   11974 	/*
   11975 	 * SWDPIN   LU RXCW
   11976 	 *	0    0	  0
   11977 	 *	0    0	  1	(should not happen)
   11978 	 *	0    1	  0	(should not happen)
   11979 	 *	0    1	  1	(should not happen)
   11980 	 *	1    0	  0	Disable autonego and force linkup
   11981 	 *	1    0	  1	got /C/ but not linkup yet
   11982 	 *	1    1	  0	(linkup)
   11983 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11984 	 *
   11985 	 */
   11986 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11987 		DPRINTF(WM_DEBUG_LINK,
   11988 		    ("%s: %s: force linkup and fullduplex\n",
   11989 			device_xname(sc->sc_dev), __func__));
   11990 		sc->sc_tbi_linkup = 0;
   11991 		/* Disable auto-negotiation in the TXCW register */
   11992 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11993 
   11994 		/*
   11995 		 * Force link-up and also force full-duplex.
   11996 		 *
   11997 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11998 		 * so we should update sc->sc_ctrl
   11999 		 */
   12000 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12001 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12002 	} else if (((status & STATUS_LU) != 0)
   12003 	    && ((rxcw & RXCW_C) != 0)
   12004 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12005 		sc->sc_tbi_linkup = 1;
   12006 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12007 			device_xname(sc->sc_dev),
   12008 			__func__));
   12009 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12010 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12011 	} else if (signal && ((rxcw & RXCW_C) != 0))
   12012 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12013 			device_xname(sc->sc_dev), __func__));
   12014 	else
   12015 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12016 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12017 			status));
   12018 
   12019 	return 0;
   12020 }
   12021 
   12022 /*
   12023  * wm_tbi_tick:
   12024  *
   12025  *	Check the link on TBI devices.
   12026  *	This function acts as mii_tick().
   12027  */
   12028 static void
   12029 wm_tbi_tick(struct wm_softc *sc)
   12030 {
   12031 	struct mii_data *mii = &sc->sc_mii;
   12032 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12033 	uint32_t status;
   12034 
   12035 	KASSERT(WM_CORE_LOCKED(sc));
   12036 
   12037 	status = CSR_READ(sc, WMREG_STATUS);
   12038 
   12039 	/* XXX is this needed? */
   12040 	(void)CSR_READ(sc, WMREG_RXCW);
   12041 	(void)CSR_READ(sc, WMREG_CTRL);
   12042 
   12043 	/* set link status */
   12044 	if ((status & STATUS_LU) == 0) {
   12045 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12046 			device_xname(sc->sc_dev)));
   12047 		sc->sc_tbi_linkup = 0;
   12048 	} else if (sc->sc_tbi_linkup == 0) {
   12049 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12050 			device_xname(sc->sc_dev),
   12051 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12052 		sc->sc_tbi_linkup = 1;
   12053 		sc->sc_tbi_serdes_ticks = 0;
   12054 	}
   12055 
   12056 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12057 		goto setled;
   12058 
   12059 	if ((status & STATUS_LU) == 0) {
   12060 		sc->sc_tbi_linkup = 0;
   12061 		/* If the timer expired, retry autonegotiation */
   12062 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12063 		    && (++sc->sc_tbi_serdes_ticks
   12064 			>= sc->sc_tbi_serdes_anegticks)) {
   12065 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12066 			sc->sc_tbi_serdes_ticks = 0;
   12067 			/*
   12068 			 * Reset the link, and let autonegotiation do
   12069 			 * its thing
   12070 			 */
   12071 			sc->sc_ctrl |= CTRL_LRST;
   12072 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12073 			CSR_WRITE_FLUSH(sc);
   12074 			delay(1000);
   12075 			sc->sc_ctrl &= ~CTRL_LRST;
   12076 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12077 			CSR_WRITE_FLUSH(sc);
   12078 			delay(1000);
   12079 			CSR_WRITE(sc, WMREG_TXCW,
   12080 			    sc->sc_txcw & ~TXCW_ANE);
   12081 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12082 		}
   12083 	}
   12084 
   12085 setled:
   12086 	wm_tbi_serdes_set_linkled(sc);
   12087 }
   12088 
   12089 /* SERDES related */
   12090 static void
   12091 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12092 {
   12093 	uint32_t reg;
   12094 
   12095 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12096 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12097 		return;
   12098 
   12099 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12100 	reg |= PCS_CFG_PCS_EN;
   12101 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12102 
   12103 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12104 	reg &= ~CTRL_EXT_SWDPIN(3);
   12105 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12106 	CSR_WRITE_FLUSH(sc);
   12107 }
   12108 
   12109 static int
   12110 wm_serdes_mediachange(struct ifnet *ifp)
   12111 {
   12112 	struct wm_softc *sc = ifp->if_softc;
   12113 	bool pcs_autoneg = true; /* XXX */
   12114 	uint32_t ctrl_ext, pcs_lctl, reg;
   12115 
   12116 	/* XXX Currently, this function is not called on 8257[12] */
   12117 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12118 	    || (sc->sc_type >= WM_T_82575))
   12119 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12120 
   12121 	wm_serdes_power_up_link_82575(sc);
   12122 
   12123 	sc->sc_ctrl |= CTRL_SLU;
   12124 
   12125 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12126 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12127 
   12128 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12129 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12130 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12131 	case CTRL_EXT_LINK_MODE_SGMII:
   12132 		pcs_autoneg = true;
   12133 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12134 		break;
   12135 	case CTRL_EXT_LINK_MODE_1000KX:
   12136 		pcs_autoneg = false;
   12137 		/* FALLTHROUGH */
   12138 	default:
   12139 		if ((sc->sc_type == WM_T_82575)
   12140 		    || (sc->sc_type == WM_T_82576)) {
   12141 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12142 				pcs_autoneg = false;
   12143 		}
   12144 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12145 		    | CTRL_FRCFDX;
   12146 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12147 	}
   12148 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12149 
   12150 	if (pcs_autoneg) {
   12151 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12152 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12153 
   12154 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12155 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12156 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12157 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12158 	} else
   12159 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12160 
   12161 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12162 
   12163 
   12164 	return 0;
   12165 }
   12166 
   12167 static void
   12168 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12169 {
   12170 	struct wm_softc *sc = ifp->if_softc;
   12171 	struct mii_data *mii = &sc->sc_mii;
   12172 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12173 	uint32_t pcs_adv, pcs_lpab, reg;
   12174 
   12175 	ifmr->ifm_status = IFM_AVALID;
   12176 	ifmr->ifm_active = IFM_ETHER;
   12177 
   12178 	/* Check PCS */
   12179 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12180 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12181 		ifmr->ifm_active |= IFM_NONE;
   12182 		sc->sc_tbi_linkup = 0;
   12183 		goto setled;
   12184 	}
   12185 
   12186 	sc->sc_tbi_linkup = 1;
   12187 	ifmr->ifm_status |= IFM_ACTIVE;
   12188 	if (sc->sc_type == WM_T_I354) {
   12189 		uint32_t status;
   12190 
   12191 		status = CSR_READ(sc, WMREG_STATUS);
   12192 		if (((status & STATUS_2P5_SKU) != 0)
   12193 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12194 			ifmr->ifm_active |= IFM_2500_KX;
   12195 		} else
   12196 			ifmr->ifm_active |= IFM_1000_KX;
   12197 	} else {
   12198 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12199 		case PCS_LSTS_SPEED_10:
   12200 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12201 			break;
   12202 		case PCS_LSTS_SPEED_100:
   12203 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12204 			break;
   12205 		case PCS_LSTS_SPEED_1000:
   12206 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12207 			break;
   12208 		default:
   12209 			device_printf(sc->sc_dev, "Unknown speed\n");
   12210 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12211 			break;
   12212 		}
   12213 	}
   12214 	if ((reg & PCS_LSTS_FDX) != 0)
   12215 		ifmr->ifm_active |= IFM_FDX;
   12216 	else
   12217 		ifmr->ifm_active |= IFM_HDX;
   12218 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12219 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12220 		/* Check flow */
   12221 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12222 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12223 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12224 			goto setled;
   12225 		}
   12226 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12227 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12228 		DPRINTF(WM_DEBUG_LINK,
   12229 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12230 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12231 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12232 			mii->mii_media_active |= IFM_FLOW
   12233 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12234 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12235 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12236 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12237 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12238 			mii->mii_media_active |= IFM_FLOW
   12239 			    | IFM_ETH_TXPAUSE;
   12240 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12241 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12242 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12243 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12244 			mii->mii_media_active |= IFM_FLOW
   12245 			    | IFM_ETH_RXPAUSE;
   12246 		}
   12247 	}
   12248 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12249 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12250 setled:
   12251 	wm_tbi_serdes_set_linkled(sc);
   12252 }
   12253 
   12254 /*
   12255  * wm_serdes_tick:
   12256  *
   12257  *	Check the link on serdes devices.
   12258  */
   12259 static void
   12260 wm_serdes_tick(struct wm_softc *sc)
   12261 {
   12262 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12263 	struct mii_data *mii = &sc->sc_mii;
   12264 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12265 	uint32_t reg;
   12266 
   12267 	KASSERT(WM_CORE_LOCKED(sc));
   12268 
   12269 	mii->mii_media_status = IFM_AVALID;
   12270 	mii->mii_media_active = IFM_ETHER;
   12271 
   12272 	/* Check PCS */
   12273 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12274 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12275 		mii->mii_media_status |= IFM_ACTIVE;
   12276 		sc->sc_tbi_linkup = 1;
   12277 		sc->sc_tbi_serdes_ticks = 0;
   12278 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12279 		if ((reg & PCS_LSTS_FDX) != 0)
   12280 			mii->mii_media_active |= IFM_FDX;
   12281 		else
   12282 			mii->mii_media_active |= IFM_HDX;
   12283 	} else {
   12284 		mii->mii_media_status |= IFM_NONE;
   12285 		sc->sc_tbi_linkup = 0;
   12286 		/* If the timer expired, retry autonegotiation */
   12287 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12288 		    && (++sc->sc_tbi_serdes_ticks
   12289 			>= sc->sc_tbi_serdes_anegticks)) {
   12290 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12291 			sc->sc_tbi_serdes_ticks = 0;
   12292 			/* XXX */
   12293 			wm_serdes_mediachange(ifp);
   12294 		}
   12295 	}
   12296 
   12297 	wm_tbi_serdes_set_linkled(sc);
   12298 }
   12299 
   12300 /* SFP related */
   12301 
   12302 static int
   12303 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12304 {
   12305 	uint32_t i2ccmd;
   12306 	int i;
   12307 
   12308 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12309 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12310 
   12311 	/* Poll the ready bit */
   12312 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12313 		delay(50);
   12314 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12315 		if (i2ccmd & I2CCMD_READY)
   12316 			break;
   12317 	}
   12318 	if ((i2ccmd & I2CCMD_READY) == 0)
   12319 		return -1;
   12320 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12321 		return -1;
   12322 
   12323 	*data = i2ccmd & 0x00ff;
   12324 
   12325 	return 0;
   12326 }
   12327 
   12328 static uint32_t
   12329 wm_sfp_get_media_type(struct wm_softc *sc)
   12330 {
   12331 	uint32_t ctrl_ext;
   12332 	uint8_t val = 0;
   12333 	int timeout = 3;
   12334 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12335 	int rv = -1;
   12336 
   12337 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12338 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12339 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12340 	CSR_WRITE_FLUSH(sc);
   12341 
   12342 	/* Read SFP module data */
   12343 	while (timeout) {
   12344 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12345 		if (rv == 0)
   12346 			break;
   12347 		delay(100*1000); /* XXX too big */
   12348 		timeout--;
   12349 	}
   12350 	if (rv != 0)
   12351 		goto out;
   12352 	switch (val) {
   12353 	case SFF_SFP_ID_SFF:
   12354 		aprint_normal_dev(sc->sc_dev,
   12355 		    "Module/Connector soldered to board\n");
   12356 		break;
   12357 	case SFF_SFP_ID_SFP:
   12358 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12359 		break;
   12360 	case SFF_SFP_ID_UNKNOWN:
   12361 		goto out;
   12362 	default:
   12363 		break;
   12364 	}
   12365 
   12366 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12367 	if (rv != 0) {
   12368 		goto out;
   12369 	}
   12370 
   12371 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12372 		mediatype = WM_MEDIATYPE_SERDES;
   12373 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12374 		sc->sc_flags |= WM_F_SGMII;
   12375 		mediatype = WM_MEDIATYPE_COPPER;
   12376 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12377 		sc->sc_flags |= WM_F_SGMII;
   12378 		mediatype = WM_MEDIATYPE_SERDES;
   12379 	}
   12380 
   12381 out:
   12382 	/* Restore I2C interface setting */
   12383 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12384 
   12385 	return mediatype;
   12386 }
   12387 
   12388 /*
   12389  * NVM related.
   12390  * Microwire, SPI (w/wo EERD) and Flash.
   12391  */
   12392 
   12393 /* Both spi and uwire */
   12394 
   12395 /*
   12396  * wm_eeprom_sendbits:
   12397  *
   12398  *	Send a series of bits to the EEPROM.
   12399  */
   12400 static void
   12401 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12402 {
   12403 	uint32_t reg;
   12404 	int x;
   12405 
   12406 	reg = CSR_READ(sc, WMREG_EECD);
   12407 
   12408 	for (x = nbits; x > 0; x--) {
   12409 		if (bits & (1U << (x - 1)))
   12410 			reg |= EECD_DI;
   12411 		else
   12412 			reg &= ~EECD_DI;
   12413 		CSR_WRITE(sc, WMREG_EECD, reg);
   12414 		CSR_WRITE_FLUSH(sc);
   12415 		delay(2);
   12416 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12417 		CSR_WRITE_FLUSH(sc);
   12418 		delay(2);
   12419 		CSR_WRITE(sc, WMREG_EECD, reg);
   12420 		CSR_WRITE_FLUSH(sc);
   12421 		delay(2);
   12422 	}
   12423 }
   12424 
   12425 /*
   12426  * wm_eeprom_recvbits:
   12427  *
   12428  *	Receive a series of bits from the EEPROM.
   12429  */
   12430 static void
   12431 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12432 {
   12433 	uint32_t reg, val;
   12434 	int x;
   12435 
   12436 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12437 
   12438 	val = 0;
   12439 	for (x = nbits; x > 0; x--) {
   12440 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12441 		CSR_WRITE_FLUSH(sc);
   12442 		delay(2);
   12443 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12444 			val |= (1U << (x - 1));
   12445 		CSR_WRITE(sc, WMREG_EECD, reg);
   12446 		CSR_WRITE_FLUSH(sc);
   12447 		delay(2);
   12448 	}
   12449 	*valp = val;
   12450 }
   12451 
   12452 /* Microwire */
   12453 
   12454 /*
   12455  * wm_nvm_read_uwire:
   12456  *
   12457  *	Read a word from the EEPROM using the MicroWire protocol.
   12458  */
   12459 static int
   12460 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12461 {
   12462 	uint32_t reg, val;
   12463 	int i;
   12464 
   12465 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12466 		device_xname(sc->sc_dev), __func__));
   12467 
   12468 	if (sc->nvm.acquire(sc) != 0)
   12469 		return -1;
   12470 
   12471 	for (i = 0; i < wordcnt; i++) {
   12472 		/* Clear SK and DI. */
   12473 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12474 		CSR_WRITE(sc, WMREG_EECD, reg);
   12475 
   12476 		/*
   12477 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12478 		 * and Xen.
   12479 		 *
   12480 		 * We use this workaround only for 82540 because qemu's
   12481 		 * e1000 act as 82540.
   12482 		 */
   12483 		if (sc->sc_type == WM_T_82540) {
   12484 			reg |= EECD_SK;
   12485 			CSR_WRITE(sc, WMREG_EECD, reg);
   12486 			reg &= ~EECD_SK;
   12487 			CSR_WRITE(sc, WMREG_EECD, reg);
   12488 			CSR_WRITE_FLUSH(sc);
   12489 			delay(2);
   12490 		}
   12491 		/* XXX: end of workaround */
   12492 
   12493 		/* Set CHIP SELECT. */
   12494 		reg |= EECD_CS;
   12495 		CSR_WRITE(sc, WMREG_EECD, reg);
   12496 		CSR_WRITE_FLUSH(sc);
   12497 		delay(2);
   12498 
   12499 		/* Shift in the READ command. */
   12500 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12501 
   12502 		/* Shift in address. */
   12503 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12504 
   12505 		/* Shift out the data. */
   12506 		wm_eeprom_recvbits(sc, &val, 16);
   12507 		data[i] = val & 0xffff;
   12508 
   12509 		/* Clear CHIP SELECT. */
   12510 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12511 		CSR_WRITE(sc, WMREG_EECD, reg);
   12512 		CSR_WRITE_FLUSH(sc);
   12513 		delay(2);
   12514 	}
   12515 
   12516 	sc->nvm.release(sc);
   12517 	return 0;
   12518 }
   12519 
   12520 /* SPI */
   12521 
   12522 /*
   12523  * Set SPI and FLASH related information from the EECD register.
   12524  * For 82541 and 82547, the word size is taken from EEPROM.
   12525  */
   12526 static int
   12527 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12528 {
   12529 	int size;
   12530 	uint32_t reg;
   12531 	uint16_t data;
   12532 
   12533 	reg = CSR_READ(sc, WMREG_EECD);
   12534 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12535 
   12536 	/* Read the size of NVM from EECD by default */
   12537 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12538 	switch (sc->sc_type) {
   12539 	case WM_T_82541:
   12540 	case WM_T_82541_2:
   12541 	case WM_T_82547:
   12542 	case WM_T_82547_2:
   12543 		/* Set dummy value to access EEPROM */
   12544 		sc->sc_nvm_wordsize = 64;
   12545 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12546 			aprint_error_dev(sc->sc_dev,
   12547 			    "%s: failed to read EEPROM size\n", __func__);
   12548 		}
   12549 		reg = data;
   12550 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12551 		if (size == 0)
   12552 			size = 6; /* 64 word size */
   12553 		else
   12554 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12555 		break;
   12556 	case WM_T_80003:
   12557 	case WM_T_82571:
   12558 	case WM_T_82572:
   12559 	case WM_T_82573: /* SPI case */
   12560 	case WM_T_82574: /* SPI case */
   12561 	case WM_T_82583: /* SPI case */
   12562 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12563 		if (size > 14)
   12564 			size = 14;
   12565 		break;
   12566 	case WM_T_82575:
   12567 	case WM_T_82576:
   12568 	case WM_T_82580:
   12569 	case WM_T_I350:
   12570 	case WM_T_I354:
   12571 	case WM_T_I210:
   12572 	case WM_T_I211:
   12573 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12574 		if (size > 15)
   12575 			size = 15;
   12576 		break;
   12577 	default:
   12578 		aprint_error_dev(sc->sc_dev,
   12579 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12580 		return -1;
   12581 		break;
   12582 	}
   12583 
   12584 	sc->sc_nvm_wordsize = 1 << size;
   12585 
   12586 	return 0;
   12587 }
   12588 
   12589 /*
   12590  * wm_nvm_ready_spi:
   12591  *
   12592  *	Wait for a SPI EEPROM to be ready for commands.
   12593  */
   12594 static int
   12595 wm_nvm_ready_spi(struct wm_softc *sc)
   12596 {
   12597 	uint32_t val;
   12598 	int usec;
   12599 
   12600 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12601 		device_xname(sc->sc_dev), __func__));
   12602 
   12603 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12604 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12605 		wm_eeprom_recvbits(sc, &val, 8);
   12606 		if ((val & SPI_SR_RDY) == 0)
   12607 			break;
   12608 	}
   12609 	if (usec >= SPI_MAX_RETRIES) {
   12610 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12611 		return -1;
   12612 	}
   12613 	return 0;
   12614 }
   12615 
   12616 /*
   12617  * wm_nvm_read_spi:
   12618  *
   12619  *	Read a work from the EEPROM using the SPI protocol.
   12620  */
   12621 static int
   12622 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12623 {
   12624 	uint32_t reg, val;
   12625 	int i;
   12626 	uint8_t opc;
   12627 	int rv = 0;
   12628 
   12629 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12630 		device_xname(sc->sc_dev), __func__));
   12631 
   12632 	if (sc->nvm.acquire(sc) != 0)
   12633 		return -1;
   12634 
   12635 	/* Clear SK and CS. */
   12636 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12637 	CSR_WRITE(sc, WMREG_EECD, reg);
   12638 	CSR_WRITE_FLUSH(sc);
   12639 	delay(2);
   12640 
   12641 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12642 		goto out;
   12643 
   12644 	/* Toggle CS to flush commands. */
   12645 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12646 	CSR_WRITE_FLUSH(sc);
   12647 	delay(2);
   12648 	CSR_WRITE(sc, WMREG_EECD, reg);
   12649 	CSR_WRITE_FLUSH(sc);
   12650 	delay(2);
   12651 
   12652 	opc = SPI_OPC_READ;
   12653 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12654 		opc |= SPI_OPC_A8;
   12655 
   12656 	wm_eeprom_sendbits(sc, opc, 8);
   12657 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12658 
   12659 	for (i = 0; i < wordcnt; i++) {
   12660 		wm_eeprom_recvbits(sc, &val, 16);
   12661 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12662 	}
   12663 
   12664 	/* Raise CS and clear SK. */
   12665 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12666 	CSR_WRITE(sc, WMREG_EECD, reg);
   12667 	CSR_WRITE_FLUSH(sc);
   12668 	delay(2);
   12669 
   12670 out:
   12671 	sc->nvm.release(sc);
   12672 	return rv;
   12673 }
   12674 
   12675 /* Using with EERD */
   12676 
   12677 static int
   12678 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12679 {
   12680 	uint32_t attempts = 100000;
   12681 	uint32_t i, reg = 0;
   12682 	int32_t done = -1;
   12683 
   12684 	for (i = 0; i < attempts; i++) {
   12685 		reg = CSR_READ(sc, rw);
   12686 
   12687 		if (reg & EERD_DONE) {
   12688 			done = 0;
   12689 			break;
   12690 		}
   12691 		delay(5);
   12692 	}
   12693 
   12694 	return done;
   12695 }
   12696 
   12697 static int
   12698 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12699 {
   12700 	int i, eerd = 0;
   12701 	int rv = 0;
   12702 
   12703 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12704 		device_xname(sc->sc_dev), __func__));
   12705 
   12706 	if (sc->nvm.acquire(sc) != 0)
   12707 		return -1;
   12708 
   12709 	for (i = 0; i < wordcnt; i++) {
   12710 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12711 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12712 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12713 		if (rv != 0) {
   12714 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12715 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12716 			break;
   12717 		}
   12718 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12719 	}
   12720 
   12721 	sc->nvm.release(sc);
   12722 	return rv;
   12723 }
   12724 
   12725 /* Flash */
   12726 
   12727 static int
   12728 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12729 {
   12730 	uint32_t eecd;
   12731 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12732 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12733 	uint32_t nvm_dword = 0;
   12734 	uint8_t sig_byte = 0;
   12735 	int rv;
   12736 
   12737 	switch (sc->sc_type) {
   12738 	case WM_T_PCH_SPT:
   12739 	case WM_T_PCH_CNP:
   12740 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12741 		act_offset = ICH_NVM_SIG_WORD * 2;
   12742 
   12743 		/* set bank to 0 in case flash read fails. */
   12744 		*bank = 0;
   12745 
   12746 		/* Check bank 0 */
   12747 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12748 		if (rv != 0)
   12749 			return rv;
   12750 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12751 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12752 			*bank = 0;
   12753 			return 0;
   12754 		}
   12755 
   12756 		/* Check bank 1 */
   12757 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12758 		    &nvm_dword);
   12759 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12760 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12761 			*bank = 1;
   12762 			return 0;
   12763 		}
   12764 		aprint_error_dev(sc->sc_dev,
   12765 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12766 		return -1;
   12767 	case WM_T_ICH8:
   12768 	case WM_T_ICH9:
   12769 		eecd = CSR_READ(sc, WMREG_EECD);
   12770 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12771 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12772 			return 0;
   12773 		}
   12774 		/* FALLTHROUGH */
   12775 	default:
   12776 		/* Default to 0 */
   12777 		*bank = 0;
   12778 
   12779 		/* Check bank 0 */
   12780 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12781 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12782 			*bank = 0;
   12783 			return 0;
   12784 		}
   12785 
   12786 		/* Check bank 1 */
   12787 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12788 		    &sig_byte);
   12789 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12790 			*bank = 1;
   12791 			return 0;
   12792 		}
   12793 	}
   12794 
   12795 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12796 		device_xname(sc->sc_dev)));
   12797 	return -1;
   12798 }
   12799 
   12800 /******************************************************************************
   12801  * This function does initial flash setup so that a new read/write/erase cycle
   12802  * can be started.
   12803  *
   12804  * sc - The pointer to the hw structure
   12805  ****************************************************************************/
   12806 static int32_t
   12807 wm_ich8_cycle_init(struct wm_softc *sc)
   12808 {
   12809 	uint16_t hsfsts;
   12810 	int32_t error = 1;
   12811 	int32_t i     = 0;
   12812 
   12813 	if (sc->sc_type >= WM_T_PCH_SPT)
   12814 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12815 	else
   12816 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12817 
   12818 	/* May be check the Flash Des Valid bit in Hw status */
   12819 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12820 		return error;
   12821 
   12822 	/* Clear FCERR in Hw status by writing 1 */
   12823 	/* Clear DAEL in Hw status by writing a 1 */
   12824 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12825 
   12826 	if (sc->sc_type >= WM_T_PCH_SPT)
   12827 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12828 	else
   12829 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12830 
   12831 	/*
   12832 	 * Either we should have a hardware SPI cycle in progress bit to check
   12833 	 * against, in order to start a new cycle or FDONE bit should be
   12834 	 * changed in the hardware so that it is 1 after harware reset, which
   12835 	 * can then be used as an indication whether a cycle is in progress or
   12836 	 * has been completed .. we should also have some software semaphore
   12837 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12838 	 * threads access to those bits can be sequentiallized or a way so that
   12839 	 * 2 threads dont start the cycle at the same time
   12840 	 */
   12841 
   12842 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12843 		/*
   12844 		 * There is no cycle running at present, so we can start a
   12845 		 * cycle
   12846 		 */
   12847 
   12848 		/* Begin by setting Flash Cycle Done. */
   12849 		hsfsts |= HSFSTS_DONE;
   12850 		if (sc->sc_type >= WM_T_PCH_SPT)
   12851 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12852 			    hsfsts & 0xffffUL);
   12853 		else
   12854 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12855 		error = 0;
   12856 	} else {
   12857 		/*
   12858 		 * otherwise poll for sometime so the current cycle has a
   12859 		 * chance to end before giving up.
   12860 		 */
   12861 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12862 			if (sc->sc_type >= WM_T_PCH_SPT)
   12863 				hsfsts = ICH8_FLASH_READ32(sc,
   12864 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12865 			else
   12866 				hsfsts = ICH8_FLASH_READ16(sc,
   12867 				    ICH_FLASH_HSFSTS);
   12868 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12869 				error = 0;
   12870 				break;
   12871 			}
   12872 			delay(1);
   12873 		}
   12874 		if (error == 0) {
   12875 			/*
   12876 			 * Successful in waiting for previous cycle to timeout,
   12877 			 * now set the Flash Cycle Done.
   12878 			 */
   12879 			hsfsts |= HSFSTS_DONE;
   12880 			if (sc->sc_type >= WM_T_PCH_SPT)
   12881 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12882 				    hsfsts & 0xffffUL);
   12883 			else
   12884 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12885 				    hsfsts);
   12886 		}
   12887 	}
   12888 	return error;
   12889 }
   12890 
   12891 /******************************************************************************
   12892  * This function starts a flash cycle and waits for its completion
   12893  *
   12894  * sc - The pointer to the hw structure
   12895  ****************************************************************************/
   12896 static int32_t
   12897 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12898 {
   12899 	uint16_t hsflctl;
   12900 	uint16_t hsfsts;
   12901 	int32_t error = 1;
   12902 	uint32_t i = 0;
   12903 
   12904 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12905 	if (sc->sc_type >= WM_T_PCH_SPT)
   12906 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12907 	else
   12908 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12909 	hsflctl |= HSFCTL_GO;
   12910 	if (sc->sc_type >= WM_T_PCH_SPT)
   12911 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12912 		    (uint32_t)hsflctl << 16);
   12913 	else
   12914 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12915 
   12916 	/* Wait till FDONE bit is set to 1 */
   12917 	do {
   12918 		if (sc->sc_type >= WM_T_PCH_SPT)
   12919 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12920 			    & 0xffffUL;
   12921 		else
   12922 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12923 		if (hsfsts & HSFSTS_DONE)
   12924 			break;
   12925 		delay(1);
   12926 		i++;
   12927 	} while (i < timeout);
   12928 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12929 		error = 0;
   12930 
   12931 	return error;
   12932 }
   12933 
   12934 /******************************************************************************
   12935  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12936  *
   12937  * sc - The pointer to the hw structure
   12938  * index - The index of the byte or word to read.
   12939  * size - Size of data to read, 1=byte 2=word, 4=dword
   12940  * data - Pointer to the word to store the value read.
   12941  *****************************************************************************/
   12942 static int32_t
   12943 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12944     uint32_t size, uint32_t *data)
   12945 {
   12946 	uint16_t hsfsts;
   12947 	uint16_t hsflctl;
   12948 	uint32_t flash_linear_address;
   12949 	uint32_t flash_data = 0;
   12950 	int32_t error = 1;
   12951 	int32_t count = 0;
   12952 
   12953 	if (size < 1  || size > 4 || data == 0x0 ||
   12954 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12955 		return error;
   12956 
   12957 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12958 	    sc->sc_ich8_flash_base;
   12959 
   12960 	do {
   12961 		delay(1);
   12962 		/* Steps */
   12963 		error = wm_ich8_cycle_init(sc);
   12964 		if (error)
   12965 			break;
   12966 
   12967 		if (sc->sc_type >= WM_T_PCH_SPT)
   12968 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12969 			    >> 16;
   12970 		else
   12971 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12972 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12973 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12974 		    & HSFCTL_BCOUNT_MASK;
   12975 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12976 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12977 			/*
   12978 			 * In SPT, This register is in Lan memory space, not
   12979 			 * flash. Therefore, only 32 bit access is supported.
   12980 			 */
   12981 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12982 			    (uint32_t)hsflctl << 16);
   12983 		} else
   12984 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12985 
   12986 		/*
   12987 		 * Write the last 24 bits of index into Flash Linear address
   12988 		 * field in Flash Address
   12989 		 */
   12990 		/* TODO: TBD maybe check the index against the size of flash */
   12991 
   12992 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12993 
   12994 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12995 
   12996 		/*
   12997 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12998 		 * the whole sequence a few more times, else read in (shift in)
   12999 		 * the Flash Data0, the order is least significant byte first
   13000 		 * msb to lsb
   13001 		 */
   13002 		if (error == 0) {
   13003 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13004 			if (size == 1)
   13005 				*data = (uint8_t)(flash_data & 0x000000FF);
   13006 			else if (size == 2)
   13007 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13008 			else if (size == 4)
   13009 				*data = (uint32_t)flash_data;
   13010 			break;
   13011 		} else {
   13012 			/*
   13013 			 * If we've gotten here, then things are probably
   13014 			 * completely hosed, but if the error condition is
   13015 			 * detected, it won't hurt to give it another try...
   13016 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13017 			 */
   13018 			if (sc->sc_type >= WM_T_PCH_SPT)
   13019 				hsfsts = ICH8_FLASH_READ32(sc,
   13020 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13021 			else
   13022 				hsfsts = ICH8_FLASH_READ16(sc,
   13023 				    ICH_FLASH_HSFSTS);
   13024 
   13025 			if (hsfsts & HSFSTS_ERR) {
   13026 				/* Repeat for some time before giving up. */
   13027 				continue;
   13028 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13029 				break;
   13030 		}
   13031 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13032 
   13033 	return error;
   13034 }
   13035 
   13036 /******************************************************************************
   13037  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13038  *
   13039  * sc - pointer to wm_hw structure
   13040  * index - The index of the byte to read.
   13041  * data - Pointer to a byte to store the value read.
   13042  *****************************************************************************/
   13043 static int32_t
   13044 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13045 {
   13046 	int32_t status;
   13047 	uint32_t word = 0;
   13048 
   13049 	status = wm_read_ich8_data(sc, index, 1, &word);
   13050 	if (status == 0)
   13051 		*data = (uint8_t)word;
   13052 	else
   13053 		*data = 0;
   13054 
   13055 	return status;
   13056 }
   13057 
   13058 /******************************************************************************
   13059  * Reads a word from the NVM using the ICH8 flash access registers.
   13060  *
   13061  * sc - pointer to wm_hw structure
   13062  * index - The starting byte index of the word to read.
   13063  * data - Pointer to a word to store the value read.
   13064  *****************************************************************************/
   13065 static int32_t
   13066 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13067 {
   13068 	int32_t status;
   13069 	uint32_t word = 0;
   13070 
   13071 	status = wm_read_ich8_data(sc, index, 2, &word);
   13072 	if (status == 0)
   13073 		*data = (uint16_t)word;
   13074 	else
   13075 		*data = 0;
   13076 
   13077 	return status;
   13078 }
   13079 
   13080 /******************************************************************************
   13081  * Reads a dword from the NVM using the ICH8 flash access registers.
   13082  *
   13083  * sc - pointer to wm_hw structure
   13084  * index - The starting byte index of the word to read.
   13085  * data - Pointer to a word to store the value read.
   13086  *****************************************************************************/
   13087 static int32_t
   13088 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13089 {
   13090 	int32_t status;
   13091 
   13092 	status = wm_read_ich8_data(sc, index, 4, data);
   13093 	return status;
   13094 }
   13095 
   13096 /******************************************************************************
   13097  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13098  * register.
   13099  *
   13100  * sc - Struct containing variables accessed by shared code
   13101  * offset - offset of word in the EEPROM to read
   13102  * data - word read from the EEPROM
   13103  * words - number of words to read
   13104  *****************************************************************************/
   13105 static int
   13106 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13107 {
   13108 	int32_t	 rv = 0;
   13109 	uint32_t flash_bank = 0;
   13110 	uint32_t act_offset = 0;
   13111 	uint32_t bank_offset = 0;
   13112 	uint16_t word = 0;
   13113 	uint16_t i = 0;
   13114 
   13115 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13116 		device_xname(sc->sc_dev), __func__));
   13117 
   13118 	if (sc->nvm.acquire(sc) != 0)
   13119 		return -1;
   13120 
   13121 	/*
   13122 	 * We need to know which is the valid flash bank.  In the event
   13123 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13124 	 * managing flash_bank. So it cannot be trusted and needs
   13125 	 * to be updated with each read.
   13126 	 */
   13127 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13128 	if (rv) {
   13129 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13130 			device_xname(sc->sc_dev)));
   13131 		flash_bank = 0;
   13132 	}
   13133 
   13134 	/*
   13135 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13136 	 * size
   13137 	 */
   13138 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13139 
   13140 	for (i = 0; i < words; i++) {
   13141 		/* The NVM part needs a byte offset, hence * 2 */
   13142 		act_offset = bank_offset + ((offset + i) * 2);
   13143 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13144 		if (rv) {
   13145 			aprint_error_dev(sc->sc_dev,
   13146 			    "%s: failed to read NVM\n", __func__);
   13147 			break;
   13148 		}
   13149 		data[i] = word;
   13150 	}
   13151 
   13152 	sc->nvm.release(sc);
   13153 	return rv;
   13154 }
   13155 
   13156 /******************************************************************************
   13157  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13158  * register.
   13159  *
   13160  * sc - Struct containing variables accessed by shared code
   13161  * offset - offset of word in the EEPROM to read
   13162  * data - word read from the EEPROM
   13163  * words - number of words to read
   13164  *****************************************************************************/
   13165 static int
   13166 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13167 {
   13168 	int32_t	 rv = 0;
   13169 	uint32_t flash_bank = 0;
   13170 	uint32_t act_offset = 0;
   13171 	uint32_t bank_offset = 0;
   13172 	uint32_t dword = 0;
   13173 	uint16_t i = 0;
   13174 
   13175 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13176 		device_xname(sc->sc_dev), __func__));
   13177 
   13178 	if (sc->nvm.acquire(sc) != 0)
   13179 		return -1;
   13180 
   13181 	/*
   13182 	 * We need to know which is the valid flash bank.  In the event
   13183 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13184 	 * managing flash_bank. So it cannot be trusted and needs
   13185 	 * to be updated with each read.
   13186 	 */
   13187 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13188 	if (rv) {
   13189 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13190 			device_xname(sc->sc_dev)));
   13191 		flash_bank = 0;
   13192 	}
   13193 
   13194 	/*
   13195 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13196 	 * size
   13197 	 */
   13198 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13199 
   13200 	for (i = 0; i < words; i++) {
   13201 		/* The NVM part needs a byte offset, hence * 2 */
   13202 		act_offset = bank_offset + ((offset + i) * 2);
   13203 		/* but we must read dword aligned, so mask ... */
   13204 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13205 		if (rv) {
   13206 			aprint_error_dev(sc->sc_dev,
   13207 			    "%s: failed to read NVM\n", __func__);
   13208 			break;
   13209 		}
   13210 		/* ... and pick out low or high word */
   13211 		if ((act_offset & 0x2) == 0)
   13212 			data[i] = (uint16_t)(dword & 0xFFFF);
   13213 		else
   13214 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13215 	}
   13216 
   13217 	sc->nvm.release(sc);
   13218 	return rv;
   13219 }
   13220 
   13221 /* iNVM */
   13222 
   13223 static int
   13224 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13225 {
   13226 	int32_t	 rv = 0;
   13227 	uint32_t invm_dword;
   13228 	uint16_t i;
   13229 	uint8_t record_type, word_address;
   13230 
   13231 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13232 		device_xname(sc->sc_dev), __func__));
   13233 
   13234 	for (i = 0; i < INVM_SIZE; i++) {
   13235 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13236 		/* Get record type */
   13237 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13238 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13239 			break;
   13240 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13241 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13242 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13243 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13244 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13245 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13246 			if (word_address == address) {
   13247 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13248 				rv = 0;
   13249 				break;
   13250 			}
   13251 		}
   13252 	}
   13253 
   13254 	return rv;
   13255 }
   13256 
   13257 static int
   13258 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13259 {
   13260 	int rv = 0;
   13261 	int i;
   13262 
   13263 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13264 		device_xname(sc->sc_dev), __func__));
   13265 
   13266 	if (sc->nvm.acquire(sc) != 0)
   13267 		return -1;
   13268 
   13269 	for (i = 0; i < words; i++) {
   13270 		switch (offset + i) {
   13271 		case NVM_OFF_MACADDR:
   13272 		case NVM_OFF_MACADDR1:
   13273 		case NVM_OFF_MACADDR2:
   13274 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13275 			if (rv != 0) {
   13276 				data[i] = 0xffff;
   13277 				rv = -1;
   13278 			}
   13279 			break;
   13280 		case NVM_OFF_CFG2:
   13281 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13282 			if (rv != 0) {
   13283 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13284 				rv = 0;
   13285 			}
   13286 			break;
   13287 		case NVM_OFF_CFG4:
   13288 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13289 			if (rv != 0) {
   13290 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13291 				rv = 0;
   13292 			}
   13293 			break;
   13294 		case NVM_OFF_LED_1_CFG:
   13295 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13296 			if (rv != 0) {
   13297 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13298 				rv = 0;
   13299 			}
   13300 			break;
   13301 		case NVM_OFF_LED_0_2_CFG:
   13302 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13303 			if (rv != 0) {
   13304 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13305 				rv = 0;
   13306 			}
   13307 			break;
   13308 		case NVM_OFF_ID_LED_SETTINGS:
   13309 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13310 			if (rv != 0) {
   13311 				*data = ID_LED_RESERVED_FFFF;
   13312 				rv = 0;
   13313 			}
   13314 			break;
   13315 		default:
   13316 			DPRINTF(WM_DEBUG_NVM,
   13317 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13318 			*data = NVM_RESERVED_WORD;
   13319 			break;
   13320 		}
   13321 	}
   13322 
   13323 	sc->nvm.release(sc);
   13324 	return rv;
   13325 }
   13326 
   13327 /* Lock, detecting NVM type, validate checksum, version and read */
   13328 
   13329 static int
   13330 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13331 {
   13332 	uint32_t eecd = 0;
   13333 
   13334 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13335 	    || sc->sc_type == WM_T_82583) {
   13336 		eecd = CSR_READ(sc, WMREG_EECD);
   13337 
   13338 		/* Isolate bits 15 & 16 */
   13339 		eecd = ((eecd >> 15) & 0x03);
   13340 
   13341 		/* If both bits are set, device is Flash type */
   13342 		if (eecd == 0x03)
   13343 			return 0;
   13344 	}
   13345 	return 1;
   13346 }
   13347 
   13348 static int
   13349 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13350 {
   13351 	uint32_t eec;
   13352 
   13353 	eec = CSR_READ(sc, WMREG_EEC);
   13354 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13355 		return 1;
   13356 
   13357 	return 0;
   13358 }
   13359 
   13360 /*
   13361  * wm_nvm_validate_checksum
   13362  *
   13363  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13364  */
   13365 static int
   13366 wm_nvm_validate_checksum(struct wm_softc *sc)
   13367 {
   13368 	uint16_t checksum;
   13369 	uint16_t eeprom_data;
   13370 #ifdef WM_DEBUG
   13371 	uint16_t csum_wordaddr, valid_checksum;
   13372 #endif
   13373 	int i;
   13374 
   13375 	checksum = 0;
   13376 
   13377 	/* Don't check for I211 */
   13378 	if (sc->sc_type == WM_T_I211)
   13379 		return 0;
   13380 
   13381 #ifdef WM_DEBUG
   13382 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13383 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13384 		csum_wordaddr = NVM_OFF_COMPAT;
   13385 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13386 	} else {
   13387 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13388 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13389 	}
   13390 
   13391 	/* Dump EEPROM image for debug */
   13392 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13393 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13394 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13395 		/* XXX PCH_SPT? */
   13396 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13397 		if ((eeprom_data & valid_checksum) == 0)
   13398 			DPRINTF(WM_DEBUG_NVM,
   13399 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13400 				device_xname(sc->sc_dev), eeprom_data,
   13401 				    valid_checksum));
   13402 	}
   13403 
   13404 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13405 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13406 		for (i = 0; i < NVM_SIZE; i++) {
   13407 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13408 				printf("XXXX ");
   13409 			else
   13410 				printf("%04hx ", eeprom_data);
   13411 			if (i % 8 == 7)
   13412 				printf("\n");
   13413 		}
   13414 	}
   13415 
   13416 #endif /* WM_DEBUG */
   13417 
   13418 	for (i = 0; i < NVM_SIZE; i++) {
   13419 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13420 			return 1;
   13421 		checksum += eeprom_data;
   13422 	}
   13423 
   13424 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13425 #ifdef WM_DEBUG
   13426 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13427 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13428 #endif
   13429 	}
   13430 
   13431 	return 0;
   13432 }
   13433 
   13434 static void
   13435 wm_nvm_version_invm(struct wm_softc *sc)
   13436 {
   13437 	uint32_t dword;
   13438 
   13439 	/*
   13440 	 * Linux's code to decode version is very strange, so we don't
   13441 	 * obey that algorithm and just use word 61 as the document.
   13442 	 * Perhaps it's not perfect though...
   13443 	 *
   13444 	 * Example:
   13445 	 *
   13446 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13447 	 */
   13448 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13449 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13450 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13451 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13452 }
   13453 
   13454 static void
   13455 wm_nvm_version(struct wm_softc *sc)
   13456 {
   13457 	uint16_t major, minor, build, patch;
   13458 	uint16_t uid0, uid1;
   13459 	uint16_t nvm_data;
   13460 	uint16_t off;
   13461 	bool check_version = false;
   13462 	bool check_optionrom = false;
   13463 	bool have_build = false;
   13464 	bool have_uid = true;
   13465 
   13466 	/*
   13467 	 * Version format:
   13468 	 *
   13469 	 * XYYZ
   13470 	 * X0YZ
   13471 	 * X0YY
   13472 	 *
   13473 	 * Example:
   13474 	 *
   13475 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13476 	 *	82571	0x50a6	5.10.6?
   13477 	 *	82572	0x506a	5.6.10?
   13478 	 *	82572EI	0x5069	5.6.9?
   13479 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13480 	 *		0x2013	2.1.3?
   13481 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13482 	 */
   13483 
   13484 	/*
   13485 	 * XXX
   13486 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13487 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13488 	 */
   13489 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13490 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13491 		have_uid = false;
   13492 
   13493 	switch (sc->sc_type) {
   13494 	case WM_T_82571:
   13495 	case WM_T_82572:
   13496 	case WM_T_82574:
   13497 	case WM_T_82583:
   13498 		check_version = true;
   13499 		check_optionrom = true;
   13500 		have_build = true;
   13501 		break;
   13502 	case WM_T_82575:
   13503 	case WM_T_82576:
   13504 	case WM_T_82580:
   13505 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13506 			check_version = true;
   13507 		break;
   13508 	case WM_T_I211:
   13509 		wm_nvm_version_invm(sc);
   13510 		have_uid = false;
   13511 		goto printver;
   13512 	case WM_T_I210:
   13513 		if (!wm_nvm_flash_presence_i210(sc)) {
   13514 			wm_nvm_version_invm(sc);
   13515 			have_uid = false;
   13516 			goto printver;
   13517 		}
   13518 		/* FALLTHROUGH */
   13519 	case WM_T_I350:
   13520 	case WM_T_I354:
   13521 		check_version = true;
   13522 		check_optionrom = true;
   13523 		break;
   13524 	default:
   13525 		return;
   13526 	}
   13527 	if (check_version
   13528 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13529 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13530 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13531 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13532 			build = nvm_data & NVM_BUILD_MASK;
   13533 			have_build = true;
   13534 		} else
   13535 			minor = nvm_data & 0x00ff;
   13536 
   13537 		/* Decimal */
   13538 		minor = (minor / 16) * 10 + (minor % 16);
   13539 		sc->sc_nvm_ver_major = major;
   13540 		sc->sc_nvm_ver_minor = minor;
   13541 
   13542 printver:
   13543 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13544 		    sc->sc_nvm_ver_minor);
   13545 		if (have_build) {
   13546 			sc->sc_nvm_ver_build = build;
   13547 			aprint_verbose(".%d", build);
   13548 		}
   13549 	}
   13550 
   13551 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13552 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13553 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13554 		/* Option ROM Version */
   13555 		if ((off != 0x0000) && (off != 0xffff)) {
   13556 			int rv;
   13557 
   13558 			off += NVM_COMBO_VER_OFF;
   13559 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13560 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13561 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13562 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13563 				/* 16bits */
   13564 				major = uid0 >> 8;
   13565 				build = (uid0 << 8) | (uid1 >> 8);
   13566 				patch = uid1 & 0x00ff;
   13567 				aprint_verbose(", option ROM Version %d.%d.%d",
   13568 				    major, build, patch);
   13569 			}
   13570 		}
   13571 	}
   13572 
   13573 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13574 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13575 }
   13576 
   13577 /*
   13578  * wm_nvm_read:
   13579  *
   13580  *	Read data from the serial EEPROM.
   13581  */
   13582 static int
   13583 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13584 {
   13585 	int rv;
   13586 
   13587 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13588 		device_xname(sc->sc_dev), __func__));
   13589 
   13590 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13591 		return -1;
   13592 
   13593 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13594 
   13595 	return rv;
   13596 }
   13597 
   13598 /*
   13599  * Hardware semaphores.
   13600  * Very complexed...
   13601  */
   13602 
   13603 static int
   13604 wm_get_null(struct wm_softc *sc)
   13605 {
   13606 
   13607 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13608 		device_xname(sc->sc_dev), __func__));
   13609 	return 0;
   13610 }
   13611 
   13612 static void
   13613 wm_put_null(struct wm_softc *sc)
   13614 {
   13615 
   13616 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13617 		device_xname(sc->sc_dev), __func__));
   13618 	return;
   13619 }
   13620 
   13621 static int
   13622 wm_get_eecd(struct wm_softc *sc)
   13623 {
   13624 	uint32_t reg;
   13625 	int x;
   13626 
   13627 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13628 		device_xname(sc->sc_dev), __func__));
   13629 
   13630 	reg = CSR_READ(sc, WMREG_EECD);
   13631 
   13632 	/* Request EEPROM access. */
   13633 	reg |= EECD_EE_REQ;
   13634 	CSR_WRITE(sc, WMREG_EECD, reg);
   13635 
   13636 	/* ..and wait for it to be granted. */
   13637 	for (x = 0; x < 1000; x++) {
   13638 		reg = CSR_READ(sc, WMREG_EECD);
   13639 		if (reg & EECD_EE_GNT)
   13640 			break;
   13641 		delay(5);
   13642 	}
   13643 	if ((reg & EECD_EE_GNT) == 0) {
   13644 		aprint_error_dev(sc->sc_dev,
   13645 		    "could not acquire EEPROM GNT\n");
   13646 		reg &= ~EECD_EE_REQ;
   13647 		CSR_WRITE(sc, WMREG_EECD, reg);
   13648 		return -1;
   13649 	}
   13650 
   13651 	return 0;
   13652 }
   13653 
   13654 static void
   13655 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13656 {
   13657 
   13658 	*eecd |= EECD_SK;
   13659 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13660 	CSR_WRITE_FLUSH(sc);
   13661 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13662 		delay(1);
   13663 	else
   13664 		delay(50);
   13665 }
   13666 
   13667 static void
   13668 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13669 {
   13670 
   13671 	*eecd &= ~EECD_SK;
   13672 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13673 	CSR_WRITE_FLUSH(sc);
   13674 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13675 		delay(1);
   13676 	else
   13677 		delay(50);
   13678 }
   13679 
   13680 static void
   13681 wm_put_eecd(struct wm_softc *sc)
   13682 {
   13683 	uint32_t reg;
   13684 
   13685 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13686 		device_xname(sc->sc_dev), __func__));
   13687 
   13688 	/* Stop nvm */
   13689 	reg = CSR_READ(sc, WMREG_EECD);
   13690 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13691 		/* Pull CS high */
   13692 		reg |= EECD_CS;
   13693 		wm_nvm_eec_clock_lower(sc, &reg);
   13694 	} else {
   13695 		/* CS on Microwire is active-high */
   13696 		reg &= ~(EECD_CS | EECD_DI);
   13697 		CSR_WRITE(sc, WMREG_EECD, reg);
   13698 		wm_nvm_eec_clock_raise(sc, &reg);
   13699 		wm_nvm_eec_clock_lower(sc, &reg);
   13700 	}
   13701 
   13702 	reg = CSR_READ(sc, WMREG_EECD);
   13703 	reg &= ~EECD_EE_REQ;
   13704 	CSR_WRITE(sc, WMREG_EECD, reg);
   13705 
   13706 	return;
   13707 }
   13708 
   13709 /*
   13710  * Get hardware semaphore.
   13711  * Same as e1000_get_hw_semaphore_generic()
   13712  */
   13713 static int
   13714 wm_get_swsm_semaphore(struct wm_softc *sc)
   13715 {
   13716 	int32_t timeout;
   13717 	uint32_t swsm;
   13718 
   13719 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13720 		device_xname(sc->sc_dev), __func__));
   13721 	KASSERT(sc->sc_nvm_wordsize > 0);
   13722 
   13723 retry:
   13724 	/* Get the SW semaphore. */
   13725 	timeout = sc->sc_nvm_wordsize + 1;
   13726 	while (timeout) {
   13727 		swsm = CSR_READ(sc, WMREG_SWSM);
   13728 
   13729 		if ((swsm & SWSM_SMBI) == 0)
   13730 			break;
   13731 
   13732 		delay(50);
   13733 		timeout--;
   13734 	}
   13735 
   13736 	if (timeout == 0) {
   13737 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13738 			/*
   13739 			 * In rare circumstances, the SW semaphore may already
   13740 			 * be held unintentionally. Clear the semaphore once
   13741 			 * before giving up.
   13742 			 */
   13743 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13744 			wm_put_swsm_semaphore(sc);
   13745 			goto retry;
   13746 		}
   13747 		aprint_error_dev(sc->sc_dev,
   13748 		    "could not acquire SWSM SMBI\n");
   13749 		return 1;
   13750 	}
   13751 
   13752 	/* Get the FW semaphore. */
   13753 	timeout = sc->sc_nvm_wordsize + 1;
   13754 	while (timeout) {
   13755 		swsm = CSR_READ(sc, WMREG_SWSM);
   13756 		swsm |= SWSM_SWESMBI;
   13757 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13758 		/* If we managed to set the bit we got the semaphore. */
   13759 		swsm = CSR_READ(sc, WMREG_SWSM);
   13760 		if (swsm & SWSM_SWESMBI)
   13761 			break;
   13762 
   13763 		delay(50);
   13764 		timeout--;
   13765 	}
   13766 
   13767 	if (timeout == 0) {
   13768 		aprint_error_dev(sc->sc_dev,
   13769 		    "could not acquire SWSM SWESMBI\n");
   13770 		/* Release semaphores */
   13771 		wm_put_swsm_semaphore(sc);
   13772 		return 1;
   13773 	}
   13774 	return 0;
   13775 }
   13776 
   13777 /*
   13778  * Put hardware semaphore.
   13779  * Same as e1000_put_hw_semaphore_generic()
   13780  */
   13781 static void
   13782 wm_put_swsm_semaphore(struct wm_softc *sc)
   13783 {
   13784 	uint32_t swsm;
   13785 
   13786 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13787 		device_xname(sc->sc_dev), __func__));
   13788 
   13789 	swsm = CSR_READ(sc, WMREG_SWSM);
   13790 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13791 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13792 }
   13793 
   13794 /*
   13795  * Get SW/FW semaphore.
   13796  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13797  */
   13798 static int
   13799 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13800 {
   13801 	uint32_t swfw_sync;
   13802 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13803 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13804 	int timeout;
   13805 
   13806 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13807 		device_xname(sc->sc_dev), __func__));
   13808 
   13809 	if (sc->sc_type == WM_T_80003)
   13810 		timeout = 50;
   13811 	else
   13812 		timeout = 200;
   13813 
   13814 	while (timeout) {
   13815 		if (wm_get_swsm_semaphore(sc)) {
   13816 			aprint_error_dev(sc->sc_dev,
   13817 			    "%s: failed to get semaphore\n",
   13818 			    __func__);
   13819 			return 1;
   13820 		}
   13821 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13822 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13823 			swfw_sync |= swmask;
   13824 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13825 			wm_put_swsm_semaphore(sc);
   13826 			return 0;
   13827 		}
   13828 		wm_put_swsm_semaphore(sc);
   13829 		delay(5000);
   13830 		timeout--;
   13831 	}
   13832 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13833 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13834 	return 1;
   13835 }
   13836 
   13837 static void
   13838 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13839 {
   13840 	uint32_t swfw_sync;
   13841 
   13842 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13843 		device_xname(sc->sc_dev), __func__));
   13844 
   13845 	while (wm_get_swsm_semaphore(sc) != 0)
   13846 		continue;
   13847 
   13848 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13849 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13850 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13851 
   13852 	wm_put_swsm_semaphore(sc);
   13853 }
   13854 
   13855 static int
   13856 wm_get_nvm_80003(struct wm_softc *sc)
   13857 {
   13858 	int rv;
   13859 
   13860 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13861 		device_xname(sc->sc_dev), __func__));
   13862 
   13863 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13864 		aprint_error_dev(sc->sc_dev,
   13865 		    "%s: failed to get semaphore(SWFW)\n",
   13866 		    __func__);
   13867 		return rv;
   13868 	}
   13869 
   13870 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13871 	    && (rv = wm_get_eecd(sc)) != 0) {
   13872 		aprint_error_dev(sc->sc_dev,
   13873 		    "%s: failed to get semaphore(EECD)\n",
   13874 		    __func__);
   13875 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13876 		return rv;
   13877 	}
   13878 
   13879 	return 0;
   13880 }
   13881 
   13882 static void
   13883 wm_put_nvm_80003(struct wm_softc *sc)
   13884 {
   13885 
   13886 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13887 		device_xname(sc->sc_dev), __func__));
   13888 
   13889 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13890 		wm_put_eecd(sc);
   13891 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13892 }
   13893 
   13894 static int
   13895 wm_get_nvm_82571(struct wm_softc *sc)
   13896 {
   13897 	int rv;
   13898 
   13899 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13900 		device_xname(sc->sc_dev), __func__));
   13901 
   13902 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13903 		return rv;
   13904 
   13905 	switch (sc->sc_type) {
   13906 	case WM_T_82573:
   13907 		break;
   13908 	default:
   13909 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13910 			rv = wm_get_eecd(sc);
   13911 		break;
   13912 	}
   13913 
   13914 	if (rv != 0) {
   13915 		aprint_error_dev(sc->sc_dev,
   13916 		    "%s: failed to get semaphore\n",
   13917 		    __func__);
   13918 		wm_put_swsm_semaphore(sc);
   13919 	}
   13920 
   13921 	return rv;
   13922 }
   13923 
   13924 static void
   13925 wm_put_nvm_82571(struct wm_softc *sc)
   13926 {
   13927 
   13928 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13929 		device_xname(sc->sc_dev), __func__));
   13930 
   13931 	switch (sc->sc_type) {
   13932 	case WM_T_82573:
   13933 		break;
   13934 	default:
   13935 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13936 			wm_put_eecd(sc);
   13937 		break;
   13938 	}
   13939 
   13940 	wm_put_swsm_semaphore(sc);
   13941 }
   13942 
   13943 static int
   13944 wm_get_phy_82575(struct wm_softc *sc)
   13945 {
   13946 
   13947 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13948 		device_xname(sc->sc_dev), __func__));
   13949 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13950 }
   13951 
   13952 static void
   13953 wm_put_phy_82575(struct wm_softc *sc)
   13954 {
   13955 
   13956 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13957 		device_xname(sc->sc_dev), __func__));
   13958 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13959 }
   13960 
   13961 static int
   13962 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13963 {
   13964 	uint32_t ext_ctrl;
   13965 	int timeout = 200;
   13966 
   13967 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13968 		device_xname(sc->sc_dev), __func__));
   13969 
   13970 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13971 	for (timeout = 0; timeout < 200; timeout++) {
   13972 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13973 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13974 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13975 
   13976 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13977 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13978 			return 0;
   13979 		delay(5000);
   13980 	}
   13981 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13982 	    device_xname(sc->sc_dev), ext_ctrl);
   13983 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13984 	return 1;
   13985 }
   13986 
   13987 static void
   13988 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13989 {
   13990 	uint32_t ext_ctrl;
   13991 
   13992 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13993 		device_xname(sc->sc_dev), __func__));
   13994 
   13995 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13996 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13997 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13998 
   13999 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14000 }
   14001 
   14002 static int
   14003 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14004 {
   14005 	uint32_t ext_ctrl;
   14006 	int timeout;
   14007 
   14008 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14009 		device_xname(sc->sc_dev), __func__));
   14010 	mutex_enter(sc->sc_ich_phymtx);
   14011 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14012 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14013 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14014 			break;
   14015 		delay(1000);
   14016 	}
   14017 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14018 		printf("%s: SW has already locked the resource\n",
   14019 		    device_xname(sc->sc_dev));
   14020 		goto out;
   14021 	}
   14022 
   14023 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14024 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14025 	for (timeout = 0; timeout < 1000; timeout++) {
   14026 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14027 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14028 			break;
   14029 		delay(1000);
   14030 	}
   14031 	if (timeout >= 1000) {
   14032 		printf("%s: failed to acquire semaphore\n",
   14033 		    device_xname(sc->sc_dev));
   14034 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14035 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14036 		goto out;
   14037 	}
   14038 	return 0;
   14039 
   14040 out:
   14041 	mutex_exit(sc->sc_ich_phymtx);
   14042 	return 1;
   14043 }
   14044 
   14045 static void
   14046 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14047 {
   14048 	uint32_t ext_ctrl;
   14049 
   14050 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14051 		device_xname(sc->sc_dev), __func__));
   14052 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14053 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14054 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14055 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14056 	} else {
   14057 		printf("%s: Semaphore unexpectedly released\n",
   14058 		    device_xname(sc->sc_dev));
   14059 	}
   14060 
   14061 	mutex_exit(sc->sc_ich_phymtx);
   14062 }
   14063 
   14064 static int
   14065 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14066 {
   14067 
   14068 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14069 		device_xname(sc->sc_dev), __func__));
   14070 	mutex_enter(sc->sc_ich_nvmmtx);
   14071 
   14072 	return 0;
   14073 }
   14074 
   14075 static void
   14076 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14077 {
   14078 
   14079 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14080 		device_xname(sc->sc_dev), __func__));
   14081 	mutex_exit(sc->sc_ich_nvmmtx);
   14082 }
   14083 
   14084 static int
   14085 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14086 {
   14087 	int i = 0;
   14088 	uint32_t reg;
   14089 
   14090 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14091 		device_xname(sc->sc_dev), __func__));
   14092 
   14093 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14094 	do {
   14095 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14096 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14097 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14098 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14099 			break;
   14100 		delay(2*1000);
   14101 		i++;
   14102 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14103 
   14104 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14105 		wm_put_hw_semaphore_82573(sc);
   14106 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14107 		    device_xname(sc->sc_dev));
   14108 		return -1;
   14109 	}
   14110 
   14111 	return 0;
   14112 }
   14113 
   14114 static void
   14115 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14116 {
   14117 	uint32_t reg;
   14118 
   14119 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14120 		device_xname(sc->sc_dev), __func__));
   14121 
   14122 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14123 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14124 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14125 }
   14126 
   14127 /*
   14128  * Management mode and power management related subroutines.
   14129  * BMC, AMT, suspend/resume and EEE.
   14130  */
   14131 
   14132 #ifdef WM_WOL
   14133 static int
   14134 wm_check_mng_mode(struct wm_softc *sc)
   14135 {
   14136 	int rv;
   14137 
   14138 	switch (sc->sc_type) {
   14139 	case WM_T_ICH8:
   14140 	case WM_T_ICH9:
   14141 	case WM_T_ICH10:
   14142 	case WM_T_PCH:
   14143 	case WM_T_PCH2:
   14144 	case WM_T_PCH_LPT:
   14145 	case WM_T_PCH_SPT:
   14146 	case WM_T_PCH_CNP:
   14147 		rv = wm_check_mng_mode_ich8lan(sc);
   14148 		break;
   14149 	case WM_T_82574:
   14150 	case WM_T_82583:
   14151 		rv = wm_check_mng_mode_82574(sc);
   14152 		break;
   14153 	case WM_T_82571:
   14154 	case WM_T_82572:
   14155 	case WM_T_82573:
   14156 	case WM_T_80003:
   14157 		rv = wm_check_mng_mode_generic(sc);
   14158 		break;
   14159 	default:
   14160 		/* noting to do */
   14161 		rv = 0;
   14162 		break;
   14163 	}
   14164 
   14165 	return rv;
   14166 }
   14167 
   14168 static int
   14169 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14170 {
   14171 	uint32_t fwsm;
   14172 
   14173 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14174 
   14175 	if (((fwsm & FWSM_FW_VALID) != 0)
   14176 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14177 		return 1;
   14178 
   14179 	return 0;
   14180 }
   14181 
   14182 static int
   14183 wm_check_mng_mode_82574(struct wm_softc *sc)
   14184 {
   14185 	uint16_t data;
   14186 
   14187 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14188 
   14189 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14190 		return 1;
   14191 
   14192 	return 0;
   14193 }
   14194 
   14195 static int
   14196 wm_check_mng_mode_generic(struct wm_softc *sc)
   14197 {
   14198 	uint32_t fwsm;
   14199 
   14200 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14201 
   14202 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14203 		return 1;
   14204 
   14205 	return 0;
   14206 }
   14207 #endif /* WM_WOL */
   14208 
   14209 static int
   14210 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14211 {
   14212 	uint32_t manc, fwsm, factps;
   14213 
   14214 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14215 		return 0;
   14216 
   14217 	manc = CSR_READ(sc, WMREG_MANC);
   14218 
   14219 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14220 		device_xname(sc->sc_dev), manc));
   14221 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14222 		return 0;
   14223 
   14224 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14225 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14226 		factps = CSR_READ(sc, WMREG_FACTPS);
   14227 		if (((factps & FACTPS_MNGCG) == 0)
   14228 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14229 			return 1;
   14230 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14231 		uint16_t data;
   14232 
   14233 		factps = CSR_READ(sc, WMREG_FACTPS);
   14234 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14235 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14236 			device_xname(sc->sc_dev), factps, data));
   14237 		if (((factps & FACTPS_MNGCG) == 0)
   14238 		    && ((data & NVM_CFG2_MNGM_MASK)
   14239 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14240 			return 1;
   14241 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14242 	    && ((manc & MANC_ASF_EN) == 0))
   14243 		return 1;
   14244 
   14245 	return 0;
   14246 }
   14247 
   14248 static bool
   14249 wm_phy_resetisblocked(struct wm_softc *sc)
   14250 {
   14251 	bool blocked = false;
   14252 	uint32_t reg;
   14253 	int i = 0;
   14254 
   14255 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14256 		device_xname(sc->sc_dev), __func__));
   14257 
   14258 	switch (sc->sc_type) {
   14259 	case WM_T_ICH8:
   14260 	case WM_T_ICH9:
   14261 	case WM_T_ICH10:
   14262 	case WM_T_PCH:
   14263 	case WM_T_PCH2:
   14264 	case WM_T_PCH_LPT:
   14265 	case WM_T_PCH_SPT:
   14266 	case WM_T_PCH_CNP:
   14267 		do {
   14268 			reg = CSR_READ(sc, WMREG_FWSM);
   14269 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14270 				blocked = true;
   14271 				delay(10*1000);
   14272 				continue;
   14273 			}
   14274 			blocked = false;
   14275 		} while (blocked && (i++ < 30));
   14276 		return blocked;
   14277 		break;
   14278 	case WM_T_82571:
   14279 	case WM_T_82572:
   14280 	case WM_T_82573:
   14281 	case WM_T_82574:
   14282 	case WM_T_82583:
   14283 	case WM_T_80003:
   14284 		reg = CSR_READ(sc, WMREG_MANC);
   14285 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14286 			return true;
   14287 		else
   14288 			return false;
   14289 		break;
   14290 	default:
   14291 		/* no problem */
   14292 		break;
   14293 	}
   14294 
   14295 	return false;
   14296 }
   14297 
   14298 static void
   14299 wm_get_hw_control(struct wm_softc *sc)
   14300 {
   14301 	uint32_t reg;
   14302 
   14303 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14304 		device_xname(sc->sc_dev), __func__));
   14305 
   14306 	if (sc->sc_type == WM_T_82573) {
   14307 		reg = CSR_READ(sc, WMREG_SWSM);
   14308 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14309 	} else if (sc->sc_type >= WM_T_82571) {
   14310 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14311 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14312 	}
   14313 }
   14314 
   14315 static void
   14316 wm_release_hw_control(struct wm_softc *sc)
   14317 {
   14318 	uint32_t reg;
   14319 
   14320 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14321 		device_xname(sc->sc_dev), __func__));
   14322 
   14323 	if (sc->sc_type == WM_T_82573) {
   14324 		reg = CSR_READ(sc, WMREG_SWSM);
   14325 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14326 	} else if (sc->sc_type >= WM_T_82571) {
   14327 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14328 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14329 	}
   14330 }
   14331 
   14332 static void
   14333 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14334 {
   14335 	uint32_t reg;
   14336 
   14337 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14338 		device_xname(sc->sc_dev), __func__));
   14339 
   14340 	if (sc->sc_type < WM_T_PCH2)
   14341 		return;
   14342 
   14343 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14344 
   14345 	if (gate)
   14346 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14347 	else
   14348 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14349 
   14350 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14351 }
   14352 
   14353 static int
   14354 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14355 {
   14356 	uint32_t fwsm, reg;
   14357 	int rv = 0;
   14358 
   14359 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14360 		device_xname(sc->sc_dev), __func__));
   14361 
   14362 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14363 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14364 
   14365 	/* Disable ULP */
   14366 	wm_ulp_disable(sc);
   14367 
   14368 	/* Acquire PHY semaphore */
   14369 	rv = sc->phy.acquire(sc);
   14370 	if (rv != 0) {
   14371 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14372 		device_xname(sc->sc_dev), __func__));
   14373 		return -1;
   14374 	}
   14375 
   14376 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14377 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14378 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14379 	 */
   14380 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14381 	switch (sc->sc_type) {
   14382 	case WM_T_PCH_LPT:
   14383 	case WM_T_PCH_SPT:
   14384 	case WM_T_PCH_CNP:
   14385 		if (wm_phy_is_accessible_pchlan(sc))
   14386 			break;
   14387 
   14388 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14389 		 * forcing MAC to SMBus mode first.
   14390 		 */
   14391 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14392 		reg |= CTRL_EXT_FORCE_SMBUS;
   14393 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14394 #if 0
   14395 		/* XXX Isn't this required??? */
   14396 		CSR_WRITE_FLUSH(sc);
   14397 #endif
   14398 		/* Wait 50 milliseconds for MAC to finish any retries
   14399 		 * that it might be trying to perform from previous
   14400 		 * attempts to acknowledge any phy read requests.
   14401 		 */
   14402 		delay(50 * 1000);
   14403 		/* FALLTHROUGH */
   14404 	case WM_T_PCH2:
   14405 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14406 			break;
   14407 		/* FALLTHROUGH */
   14408 	case WM_T_PCH:
   14409 		if (sc->sc_type == WM_T_PCH)
   14410 			if ((fwsm & FWSM_FW_VALID) != 0)
   14411 				break;
   14412 
   14413 		if (wm_phy_resetisblocked(sc) == true) {
   14414 			printf("XXX reset is blocked(3)\n");
   14415 			break;
   14416 		}
   14417 
   14418 		/* Toggle LANPHYPC Value bit */
   14419 		wm_toggle_lanphypc_pch_lpt(sc);
   14420 
   14421 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14422 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14423 				break;
   14424 
   14425 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14426 			 * so ensure that the MAC is also out of SMBus mode
   14427 			 */
   14428 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14429 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14430 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14431 
   14432 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14433 				break;
   14434 			rv = -1;
   14435 		}
   14436 		break;
   14437 	default:
   14438 		break;
   14439 	}
   14440 
   14441 	/* Release semaphore */
   14442 	sc->phy.release(sc);
   14443 
   14444 	if (rv == 0) {
   14445 		/* Check to see if able to reset PHY.  Print error if not */
   14446 		if (wm_phy_resetisblocked(sc)) {
   14447 			printf("XXX reset is blocked(4)\n");
   14448 			goto out;
   14449 		}
   14450 
   14451 		/* Reset the PHY before any access to it.  Doing so, ensures
   14452 		 * that the PHY is in a known good state before we read/write
   14453 		 * PHY registers.  The generic reset is sufficient here,
   14454 		 * because we haven't determined the PHY type yet.
   14455 		 */
   14456 		if (wm_reset_phy(sc) != 0)
   14457 			goto out;
   14458 
   14459 		/* On a successful reset, possibly need to wait for the PHY
   14460 		 * to quiesce to an accessible state before returning control
   14461 		 * to the calling function.  If the PHY does not quiesce, then
   14462 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14463 		 *  the PHY is in.
   14464 		 */
   14465 		if (wm_phy_resetisblocked(sc))
   14466 			printf("XXX reset is blocked(4)\n");
   14467 	}
   14468 
   14469 out:
   14470 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14471 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14472 		delay(10*1000);
   14473 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14474 	}
   14475 
   14476 	return 0;
   14477 }
   14478 
   14479 static void
   14480 wm_init_manageability(struct wm_softc *sc)
   14481 {
   14482 
   14483 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14484 		device_xname(sc->sc_dev), __func__));
   14485 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14486 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14487 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14488 
   14489 		/* Disable hardware interception of ARP */
   14490 		manc &= ~MANC_ARP_EN;
   14491 
   14492 		/* Enable receiving management packets to the host */
   14493 		if (sc->sc_type >= WM_T_82571) {
   14494 			manc |= MANC_EN_MNG2HOST;
   14495 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14496 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14497 		}
   14498 
   14499 		CSR_WRITE(sc, WMREG_MANC, manc);
   14500 	}
   14501 }
   14502 
   14503 static void
   14504 wm_release_manageability(struct wm_softc *sc)
   14505 {
   14506 
   14507 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14508 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14509 
   14510 		manc |= MANC_ARP_EN;
   14511 		if (sc->sc_type >= WM_T_82571)
   14512 			manc &= ~MANC_EN_MNG2HOST;
   14513 
   14514 		CSR_WRITE(sc, WMREG_MANC, manc);
   14515 	}
   14516 }
   14517 
   14518 static void
   14519 wm_get_wakeup(struct wm_softc *sc)
   14520 {
   14521 
   14522 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14523 	switch (sc->sc_type) {
   14524 	case WM_T_82573:
   14525 	case WM_T_82583:
   14526 		sc->sc_flags |= WM_F_HAS_AMT;
   14527 		/* FALLTHROUGH */
   14528 	case WM_T_80003:
   14529 	case WM_T_82575:
   14530 	case WM_T_82576:
   14531 	case WM_T_82580:
   14532 	case WM_T_I350:
   14533 	case WM_T_I354:
   14534 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14535 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14536 		/* FALLTHROUGH */
   14537 	case WM_T_82541:
   14538 	case WM_T_82541_2:
   14539 	case WM_T_82547:
   14540 	case WM_T_82547_2:
   14541 	case WM_T_82571:
   14542 	case WM_T_82572:
   14543 	case WM_T_82574:
   14544 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14545 		break;
   14546 	case WM_T_ICH8:
   14547 	case WM_T_ICH9:
   14548 	case WM_T_ICH10:
   14549 	case WM_T_PCH:
   14550 	case WM_T_PCH2:
   14551 	case WM_T_PCH_LPT:
   14552 	case WM_T_PCH_SPT:
   14553 	case WM_T_PCH_CNP:
   14554 		sc->sc_flags |= WM_F_HAS_AMT;
   14555 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14556 		break;
   14557 	default:
   14558 		break;
   14559 	}
   14560 
   14561 	/* 1: HAS_MANAGE */
   14562 	if (wm_enable_mng_pass_thru(sc) != 0)
   14563 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14564 
   14565 	/*
   14566 	 * Note that the WOL flags is set after the resetting of the eeprom
   14567 	 * stuff
   14568 	 */
   14569 }
   14570 
   14571 /*
   14572  * Unconfigure Ultra Low Power mode.
   14573  * Only for I217 and newer (see below).
   14574  */
   14575 static int
   14576 wm_ulp_disable(struct wm_softc *sc)
   14577 {
   14578 	uint32_t reg;
   14579 	uint16_t phyreg;
   14580 	int i = 0, rv = 0;
   14581 
   14582 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14583 		device_xname(sc->sc_dev), __func__));
   14584 	/* Exclude old devices */
   14585 	if ((sc->sc_type < WM_T_PCH_LPT)
   14586 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14587 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14588 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14589 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14590 		return 0;
   14591 
   14592 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14593 		/* Request ME un-configure ULP mode in the PHY */
   14594 		reg = CSR_READ(sc, WMREG_H2ME);
   14595 		reg &= ~H2ME_ULP;
   14596 		reg |= H2ME_ENFORCE_SETTINGS;
   14597 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14598 
   14599 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14600 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14601 			if (i++ == 30) {
   14602 				printf("%s timed out\n", __func__);
   14603 				return -1;
   14604 			}
   14605 			delay(10 * 1000);
   14606 		}
   14607 		reg = CSR_READ(sc, WMREG_H2ME);
   14608 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14609 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14610 
   14611 		return 0;
   14612 	}
   14613 
   14614 	/* Acquire semaphore */
   14615 	rv = sc->phy.acquire(sc);
   14616 	if (rv != 0) {
   14617 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14618 		device_xname(sc->sc_dev), __func__));
   14619 		return -1;
   14620 	}
   14621 
   14622 	/* Toggle LANPHYPC */
   14623 	wm_toggle_lanphypc_pch_lpt(sc);
   14624 
   14625 	/* Unforce SMBus mode in PHY */
   14626 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14627 	if (rv != 0) {
   14628 		uint32_t reg2;
   14629 
   14630 		printf("%s: Force SMBus first.\n", __func__);
   14631 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14632 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14633 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14634 		delay(50 * 1000);
   14635 
   14636 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14637 		    &phyreg);
   14638 		if (rv != 0)
   14639 			goto release;
   14640 	}
   14641 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14642 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14643 
   14644 	/* Unforce SMBus mode in MAC */
   14645 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14646 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14647 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14648 
   14649 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14650 	if (rv != 0)
   14651 		goto release;
   14652 	phyreg |= HV_PM_CTRL_K1_ENA;
   14653 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14654 
   14655 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14656 		&phyreg);
   14657 	if (rv != 0)
   14658 		goto release;
   14659 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14660 	    | I218_ULP_CONFIG1_STICKY_ULP
   14661 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14662 	    | I218_ULP_CONFIG1_WOL_HOST
   14663 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14664 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14665 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14666 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14667 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14668 	phyreg |= I218_ULP_CONFIG1_START;
   14669 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14670 
   14671 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14672 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14673 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14674 
   14675 release:
   14676 	/* Release semaphore */
   14677 	sc->phy.release(sc);
   14678 	wm_gmii_reset(sc);
   14679 	delay(50 * 1000);
   14680 
   14681 	return rv;
   14682 }
   14683 
   14684 /* WOL in the newer chipset interfaces (pchlan) */
   14685 static int
   14686 wm_enable_phy_wakeup(struct wm_softc *sc)
   14687 {
   14688 	device_t dev = sc->sc_dev;
   14689 	uint32_t mreg, moff;
   14690 	uint16_t wuce, wuc, wufc, preg;
   14691 	int i, rv;
   14692 
   14693 	KASSERT(sc->sc_type >= WM_T_PCH);
   14694 
   14695 	/* Copy MAC RARs to PHY RARs */
   14696 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14697 
   14698 	/* Activate PHY wakeup */
   14699 	rv = sc->phy.acquire(sc);
   14700 	if (rv != 0) {
   14701 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14702 		    __func__);
   14703 		return rv;
   14704 	}
   14705 
   14706 	/*
   14707 	 * Enable access to PHY wakeup registers.
   14708 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14709 	 */
   14710 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14711 	if (rv != 0) {
   14712 		device_printf(dev,
   14713 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14714 		goto release;
   14715 	}
   14716 
   14717 	/* Copy MAC MTA to PHY MTA */
   14718 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14719 		uint16_t lo, hi;
   14720 
   14721 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14722 		lo = (uint16_t)(mreg & 0xffff);
   14723 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14724 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14725 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14726 	}
   14727 
   14728 	/* Configure PHY Rx Control register */
   14729 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14730 	mreg = CSR_READ(sc, WMREG_RCTL);
   14731 	if (mreg & RCTL_UPE)
   14732 		preg |= BM_RCTL_UPE;
   14733 	if (mreg & RCTL_MPE)
   14734 		preg |= BM_RCTL_MPE;
   14735 	preg &= ~(BM_RCTL_MO_MASK);
   14736 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14737 	if (moff != 0)
   14738 		preg |= moff << BM_RCTL_MO_SHIFT;
   14739 	if (mreg & RCTL_BAM)
   14740 		preg |= BM_RCTL_BAM;
   14741 	if (mreg & RCTL_PMCF)
   14742 		preg |= BM_RCTL_PMCF;
   14743 	mreg = CSR_READ(sc, WMREG_CTRL);
   14744 	if (mreg & CTRL_RFCE)
   14745 		preg |= BM_RCTL_RFCE;
   14746 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14747 
   14748 	wuc = WUC_APME | WUC_PME_EN;
   14749 	wufc = WUFC_MAG;
   14750 	/* Enable PHY wakeup in MAC register */
   14751 	CSR_WRITE(sc, WMREG_WUC,
   14752 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14753 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14754 
   14755 	/* Configure and enable PHY wakeup in PHY registers */
   14756 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14757 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14758 
   14759 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14760 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14761 
   14762 release:
   14763 	sc->phy.release(sc);
   14764 
   14765 	return 0;
   14766 }
   14767 
   14768 /* Power down workaround on D3 */
   14769 static void
   14770 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14771 {
   14772 	uint32_t reg;
   14773 	uint16_t phyreg;
   14774 	int i;
   14775 
   14776 	for (i = 0; i < 2; i++) {
   14777 		/* Disable link */
   14778 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14779 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14780 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14781 
   14782 		/*
   14783 		 * Call gig speed drop workaround on Gig disable before
   14784 		 * accessing any PHY registers
   14785 		 */
   14786 		if (sc->sc_type == WM_T_ICH8)
   14787 			wm_gig_downshift_workaround_ich8lan(sc);
   14788 
   14789 		/* Write VR power-down enable */
   14790 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14791 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14792 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14793 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14794 
   14795 		/* Read it back and test */
   14796 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14797 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14798 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14799 			break;
   14800 
   14801 		/* Issue PHY reset and repeat at most one more time */
   14802 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14803 	}
   14804 }
   14805 
   14806 /*
   14807  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14808  *  @sc: pointer to the HW structure
   14809  *
   14810  *  During S0 to Sx transition, it is possible the link remains at gig
   14811  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14812  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14813  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14814  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14815  *  needs to be written.
   14816  *  Parts that support (and are linked to a partner which support) EEE in
   14817  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14818  *  than 10Mbps w/o EEE.
   14819  */
   14820 static void
   14821 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14822 {
   14823 	device_t dev = sc->sc_dev;
   14824 	struct ethercom *ec = &sc->sc_ethercom;
   14825 	uint32_t phy_ctrl;
   14826 	int rv;
   14827 
   14828 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14829 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14830 
   14831 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14832 
   14833 	if (sc->sc_phytype == WMPHY_I217) {
   14834 		uint16_t devid = sc->sc_pcidevid;
   14835 
   14836 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14837 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14838 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14839 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14840 		    (sc->sc_type >= WM_T_PCH_SPT))
   14841 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14842 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14843 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14844 
   14845 		if (sc->phy.acquire(sc) != 0)
   14846 			goto out;
   14847 
   14848 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14849 			uint16_t eee_advert;
   14850 
   14851 			rv = wm_read_emi_reg_locked(dev,
   14852 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14853 			if (rv)
   14854 				goto release;
   14855 
   14856 			/*
   14857 			 * Disable LPLU if both link partners support 100BaseT
   14858 			 * EEE and 100Full is advertised on both ends of the
   14859 			 * link, and enable Auto Enable LPI since there will
   14860 			 * be no driver to enable LPI while in Sx.
   14861 			 */
   14862 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14863 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14864 				uint16_t anar, phy_reg;
   14865 
   14866 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14867 				    &anar);
   14868 				if (anar & ANAR_TX_FD) {
   14869 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14870 					    PHY_CTRL_NOND0A_LPLU);
   14871 
   14872 					/* Set Auto Enable LPI after link up */
   14873 					sc->phy.readreg_locked(dev, 2,
   14874 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14875 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14876 					sc->phy.writereg_locked(dev, 2,
   14877 					    I217_LPI_GPIO_CTRL, phy_reg);
   14878 				}
   14879 			}
   14880 		}
   14881 
   14882 		/*
   14883 		 * For i217 Intel Rapid Start Technology support,
   14884 		 * when the system is going into Sx and no manageability engine
   14885 		 * is present, the driver must configure proxy to reset only on
   14886 		 * power good.	LPI (Low Power Idle) state must also reset only
   14887 		 * on power good, as well as the MTA (Multicast table array).
   14888 		 * The SMBus release must also be disabled on LCD reset.
   14889 		 */
   14890 
   14891 		/*
   14892 		 * Enable MTA to reset for Intel Rapid Start Technology
   14893 		 * Support
   14894 		 */
   14895 
   14896 release:
   14897 		sc->phy.release(sc);
   14898 	}
   14899 out:
   14900 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14901 
   14902 	if (sc->sc_type == WM_T_ICH8)
   14903 		wm_gig_downshift_workaround_ich8lan(sc);
   14904 
   14905 	if (sc->sc_type >= WM_T_PCH) {
   14906 		wm_oem_bits_config_ich8lan(sc, false);
   14907 
   14908 		/* Reset PHY to activate OEM bits on 82577/8 */
   14909 		if (sc->sc_type == WM_T_PCH)
   14910 			wm_reset_phy(sc);
   14911 
   14912 		if (sc->phy.acquire(sc) != 0)
   14913 			return;
   14914 		wm_write_smbus_addr(sc);
   14915 		sc->phy.release(sc);
   14916 	}
   14917 }
   14918 
   14919 /*
   14920  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14921  *  @sc: pointer to the HW structure
   14922  *
   14923  *  During Sx to S0 transitions on non-managed devices or managed devices
   14924  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14925  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14926  *  the PHY.
   14927  *  On i217, setup Intel Rapid Start Technology.
   14928  */
   14929 static int
   14930 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14931 {
   14932 	device_t dev = sc->sc_dev;
   14933 	int rv;
   14934 
   14935 	if (sc->sc_type < WM_T_PCH2)
   14936 		return 0;
   14937 
   14938 	rv = wm_init_phy_workarounds_pchlan(sc);
   14939 	if (rv != 0)
   14940 		return -1;
   14941 
   14942 	/* For i217 Intel Rapid Start Technology support when the system
   14943 	 * is transitioning from Sx and no manageability engine is present
   14944 	 * configure SMBus to restore on reset, disable proxy, and enable
   14945 	 * the reset on MTA (Multicast table array).
   14946 	 */
   14947 	if (sc->sc_phytype == WMPHY_I217) {
   14948 		uint16_t phy_reg;
   14949 
   14950 		if (sc->phy.acquire(sc) != 0)
   14951 			return -1;
   14952 
   14953 		/* Clear Auto Enable LPI after link up */
   14954 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14955 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14956 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14957 
   14958 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14959 			/* Restore clear on SMB if no manageability engine
   14960 			 * is present
   14961 			 */
   14962 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14963 			    &phy_reg);
   14964 			if (rv != 0)
   14965 				goto release;
   14966 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14967 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14968 
   14969 			/* Disable Proxy */
   14970 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14971 		}
   14972 		/* Enable reset on MTA */
   14973 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14974 		if (rv != 0)
   14975 			goto release;
   14976 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14977 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14978 
   14979 release:
   14980 		sc->phy.release(sc);
   14981 		return rv;
   14982 	}
   14983 
   14984 	return 0;
   14985 }
   14986 
   14987 static void
   14988 wm_enable_wakeup(struct wm_softc *sc)
   14989 {
   14990 	uint32_t reg, pmreg;
   14991 	pcireg_t pmode;
   14992 	int rv = 0;
   14993 
   14994 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14995 		device_xname(sc->sc_dev), __func__));
   14996 
   14997 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14998 	    &pmreg, NULL) == 0)
   14999 		return;
   15000 
   15001 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15002 		goto pme;
   15003 
   15004 	/* Advertise the wakeup capability */
   15005 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15006 	    | CTRL_SWDPIN(3));
   15007 
   15008 	/* Keep the laser running on fiber adapters */
   15009 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15010 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15011 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15012 		reg |= CTRL_EXT_SWDPIN(3);
   15013 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15014 	}
   15015 
   15016 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15017 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15018 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15019 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15020 		wm_suspend_workarounds_ich8lan(sc);
   15021 
   15022 #if 0	/* for the multicast packet */
   15023 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15024 	reg |= WUFC_MC;
   15025 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15026 #endif
   15027 
   15028 	if (sc->sc_type >= WM_T_PCH) {
   15029 		rv = wm_enable_phy_wakeup(sc);
   15030 		if (rv != 0)
   15031 			goto pme;
   15032 	} else {
   15033 		/* Enable wakeup by the MAC */
   15034 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15035 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15036 	}
   15037 
   15038 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15039 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15040 		|| (sc->sc_type == WM_T_PCH2))
   15041 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15042 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15043 
   15044 pme:
   15045 	/* Request PME */
   15046 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15047 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15048 		/* For WOL */
   15049 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15050 	} else {
   15051 		/* Disable WOL */
   15052 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15053 	}
   15054 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15055 }
   15056 
   15057 /* Disable ASPM L0s and/or L1 for workaround */
   15058 static void
   15059 wm_disable_aspm(struct wm_softc *sc)
   15060 {
   15061 	pcireg_t reg, mask = 0;
   15062 	unsigned const char *str = "";
   15063 
   15064 	/*
   15065 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15066 	 * space.
   15067 	 */
   15068 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15069 		return;
   15070 
   15071 	switch (sc->sc_type) {
   15072 	case WM_T_82571:
   15073 	case WM_T_82572:
   15074 		/*
   15075 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15076 		 * State Power management L1 State (ASPM L1).
   15077 		 */
   15078 		mask = PCIE_LCSR_ASPM_L1;
   15079 		str = "L1 is";
   15080 		break;
   15081 	case WM_T_82573:
   15082 	case WM_T_82574:
   15083 	case WM_T_82583:
   15084 		/*
   15085 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15086 		 *
   15087 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15088 		 * some chipset.  The document of 82574 and 82583 says that
   15089 		 * disabling L0s with some specific chipset is sufficient,
   15090 		 * but we follow as of the Intel em driver does.
   15091 		 *
   15092 		 * References:
   15093 		 * Errata 8 of the Specification Update of i82573.
   15094 		 * Errata 20 of the Specification Update of i82574.
   15095 		 * Errata 9 of the Specification Update of i82583.
   15096 		 */
   15097 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15098 		str = "L0s and L1 are";
   15099 		break;
   15100 	default:
   15101 		return;
   15102 	}
   15103 
   15104 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15105 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15106 	reg &= ~mask;
   15107 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15108 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15109 
   15110 	/* Print only in wm_attach() */
   15111 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15112 		aprint_verbose_dev(sc->sc_dev,
   15113 		    "ASPM %s disabled to workaround the errata.\n", str);
   15114 }
   15115 
   15116 /* LPLU */
   15117 
   15118 static void
   15119 wm_lplu_d0_disable(struct wm_softc *sc)
   15120 {
   15121 	struct mii_data *mii = &sc->sc_mii;
   15122 	uint32_t reg;
   15123 	uint16_t phyval;
   15124 
   15125 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15126 		device_xname(sc->sc_dev), __func__));
   15127 
   15128 	if (sc->sc_phytype == WMPHY_IFE)
   15129 		return;
   15130 
   15131 	switch (sc->sc_type) {
   15132 	case WM_T_82571:
   15133 	case WM_T_82572:
   15134 	case WM_T_82573:
   15135 	case WM_T_82575:
   15136 	case WM_T_82576:
   15137 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15138 		phyval &= ~PMR_D0_LPLU;
   15139 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15140 		break;
   15141 	case WM_T_82580:
   15142 	case WM_T_I350:
   15143 	case WM_T_I210:
   15144 	case WM_T_I211:
   15145 		reg = CSR_READ(sc, WMREG_PHPM);
   15146 		reg &= ~PHPM_D0A_LPLU;
   15147 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15148 		break;
   15149 	case WM_T_82574:
   15150 	case WM_T_82583:
   15151 	case WM_T_ICH8:
   15152 	case WM_T_ICH9:
   15153 	case WM_T_ICH10:
   15154 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15155 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15156 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15157 		CSR_WRITE_FLUSH(sc);
   15158 		break;
   15159 	case WM_T_PCH:
   15160 	case WM_T_PCH2:
   15161 	case WM_T_PCH_LPT:
   15162 	case WM_T_PCH_SPT:
   15163 	case WM_T_PCH_CNP:
   15164 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15165 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15166 		if (wm_phy_resetisblocked(sc) == false)
   15167 			phyval |= HV_OEM_BITS_ANEGNOW;
   15168 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15169 		break;
   15170 	default:
   15171 		break;
   15172 	}
   15173 }
   15174 
   15175 /* EEE */
   15176 
   15177 static int
   15178 wm_set_eee_i350(struct wm_softc *sc)
   15179 {
   15180 	struct ethercom *ec = &sc->sc_ethercom;
   15181 	uint32_t ipcnfg, eeer;
   15182 	uint32_t ipcnfg_mask
   15183 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15184 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15185 
   15186 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15187 
   15188 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15189 	eeer = CSR_READ(sc, WMREG_EEER);
   15190 
   15191 	/* enable or disable per user setting */
   15192 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15193 		ipcnfg |= ipcnfg_mask;
   15194 		eeer |= eeer_mask;
   15195 	} else {
   15196 		ipcnfg &= ~ipcnfg_mask;
   15197 		eeer &= ~eeer_mask;
   15198 	}
   15199 
   15200 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15201 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15202 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15203 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15204 
   15205 	return 0;
   15206 }
   15207 
   15208 static int
   15209 wm_set_eee_pchlan(struct wm_softc *sc)
   15210 {
   15211 	device_t dev = sc->sc_dev;
   15212 	struct ethercom *ec = &sc->sc_ethercom;
   15213 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15214 	int rv = 0;
   15215 
   15216 	switch (sc->sc_phytype) {
   15217 	case WMPHY_82579:
   15218 		lpa = I82579_EEE_LP_ABILITY;
   15219 		pcs_status = I82579_EEE_PCS_STATUS;
   15220 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15221 		break;
   15222 	case WMPHY_I217:
   15223 		lpa = I217_EEE_LP_ABILITY;
   15224 		pcs_status = I217_EEE_PCS_STATUS;
   15225 		adv_addr = I217_EEE_ADVERTISEMENT;
   15226 		break;
   15227 	default:
   15228 		return 0;
   15229 	}
   15230 
   15231 	if (sc->phy.acquire(sc)) {
   15232 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15233 		return 0;
   15234 	}
   15235 
   15236 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15237 	if (rv != 0)
   15238 		goto release;
   15239 
   15240 	/* Clear bits that enable EEE in various speeds */
   15241 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15242 
   15243 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15244 		/* Save off link partner's EEE ability */
   15245 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15246 		if (rv != 0)
   15247 			goto release;
   15248 
   15249 		/* Read EEE advertisement */
   15250 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15251 			goto release;
   15252 
   15253 		/*
   15254 		 * Enable EEE only for speeds in which the link partner is
   15255 		 * EEE capable and for which we advertise EEE.
   15256 		 */
   15257 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15258 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15259 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15260 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15261 			if ((data & ANLPAR_TX_FD) != 0)
   15262 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15263 			else {
   15264 				/*
   15265 				 * EEE is not supported in 100Half, so ignore
   15266 				 * partner's EEE in 100 ability if full-duplex
   15267 				 * is not advertised.
   15268 				 */
   15269 				sc->eee_lp_ability
   15270 				    &= ~AN_EEEADVERT_100_TX;
   15271 			}
   15272 		}
   15273 	}
   15274 
   15275 	if (sc->sc_phytype == WMPHY_82579) {
   15276 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15277 		if (rv != 0)
   15278 			goto release;
   15279 
   15280 		data &= ~I82579_LPI_PLL_SHUT_100;
   15281 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15282 	}
   15283 
   15284 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15285 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15286 		goto release;
   15287 
   15288 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15289 release:
   15290 	sc->phy.release(sc);
   15291 
   15292 	return rv;
   15293 }
   15294 
   15295 static int
   15296 wm_set_eee(struct wm_softc *sc)
   15297 {
   15298 	struct ethercom *ec = &sc->sc_ethercom;
   15299 
   15300 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15301 		return 0;
   15302 
   15303 	if (sc->sc_type == WM_T_I354) {
   15304 		/* I354 uses an external PHY */
   15305 		return 0; /* not yet */
   15306 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15307 		return wm_set_eee_i350(sc);
   15308 	else if (sc->sc_type >= WM_T_PCH2)
   15309 		return wm_set_eee_pchlan(sc);
   15310 
   15311 	return 0;
   15312 }
   15313 
   15314 /*
   15315  * Workarounds (mainly PHY related).
   15316  * Basically, PHY's workarounds are in the PHY drivers.
   15317  */
   15318 
   15319 /* Work-around for 82566 Kumeran PCS lock loss */
   15320 static int
   15321 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15322 {
   15323 	struct mii_data *mii = &sc->sc_mii;
   15324 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15325 	int i, reg, rv;
   15326 	uint16_t phyreg;
   15327 
   15328 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15329 		device_xname(sc->sc_dev), __func__));
   15330 
   15331 	/* If the link is not up, do nothing */
   15332 	if ((status & STATUS_LU) == 0)
   15333 		return 0;
   15334 
   15335 	/* Nothing to do if the link is other than 1Gbps */
   15336 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15337 		return 0;
   15338 
   15339 	for (i = 0; i < 10; i++) {
   15340 		/* read twice */
   15341 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15342 		if (rv != 0)
   15343 			return rv;
   15344 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15345 		if (rv != 0)
   15346 			return rv;
   15347 
   15348 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15349 			goto out;	/* GOOD! */
   15350 
   15351 		/* Reset the PHY */
   15352 		wm_reset_phy(sc);
   15353 		delay(5*1000);
   15354 	}
   15355 
   15356 	/* Disable GigE link negotiation */
   15357 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15358 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15359 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15360 
   15361 	/*
   15362 	 * Call gig speed drop workaround on Gig disable before accessing
   15363 	 * any PHY registers.
   15364 	 */
   15365 	wm_gig_downshift_workaround_ich8lan(sc);
   15366 
   15367 out:
   15368 	return 0;
   15369 }
   15370 
   15371 /*
   15372  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15373  *  @sc: pointer to the HW structure
   15374  *
   15375  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15376  *  LPLU, Gig disable, MDIC PHY reset):
   15377  *    1) Set Kumeran Near-end loopback
   15378  *    2) Clear Kumeran Near-end loopback
   15379  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15380  */
   15381 static void
   15382 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15383 {
   15384 	uint16_t kmreg;
   15385 
   15386 	/* Only for igp3 */
   15387 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15388 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15389 			return;
   15390 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15391 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15392 			return;
   15393 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15394 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15395 	}
   15396 }
   15397 
   15398 /*
   15399  * Workaround for pch's PHYs
   15400  * XXX should be moved to new PHY driver?
   15401  */
   15402 static int
   15403 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15404 {
   15405 	device_t dev = sc->sc_dev;
   15406 	struct mii_data *mii = &sc->sc_mii;
   15407 	struct mii_softc *child;
   15408 	uint16_t phy_data, phyrev = 0;
   15409 	int phytype = sc->sc_phytype;
   15410 	int rv;
   15411 
   15412 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15413 		device_xname(dev), __func__));
   15414 	KASSERT(sc->sc_type == WM_T_PCH);
   15415 
   15416 	/* Set MDIO slow mode before any other MDIO access */
   15417 	if (phytype == WMPHY_82577)
   15418 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15419 			return rv;
   15420 
   15421 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15422 	if (child != NULL)
   15423 		phyrev = child->mii_mpd_rev;
   15424 
   15425 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15426 	if ((child != NULL) &&
   15427 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15428 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15429 		/* Disable generation of early preamble (0x4431) */
   15430 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15431 		    &phy_data);
   15432 		if (rv != 0)
   15433 			return rv;
   15434 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15435 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15436 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15437 		    phy_data);
   15438 		if (rv != 0)
   15439 			return rv;
   15440 
   15441 		/* Preamble tuning for SSC */
   15442 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15443 		if (rv != 0)
   15444 			return rv;
   15445 	}
   15446 
   15447 	/* 82578 */
   15448 	if (phytype == WMPHY_82578) {
   15449 		/*
   15450 		 * Return registers to default by doing a soft reset then
   15451 		 * writing 0x3140 to the control register
   15452 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15453 		 */
   15454 		if ((child != NULL) && (phyrev < 2)) {
   15455 			PHY_RESET(child);
   15456 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15457 			    0x3140);
   15458 			if (rv != 0)
   15459 				return rv;
   15460 		}
   15461 	}
   15462 
   15463 	/* Select page 0 */
   15464 	if ((rv = sc->phy.acquire(sc)) != 0)
   15465 		return rv;
   15466 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15467 	sc->phy.release(sc);
   15468 	if (rv != 0)
   15469 		return rv;
   15470 
   15471 	/*
   15472 	 * Configure the K1 Si workaround during phy reset assuming there is
   15473 	 * link so that it disables K1 if link is in 1Gbps.
   15474 	 */
   15475 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15476 		return rv;
   15477 
   15478 	/* Workaround for link disconnects on a busy hub in half duplex */
   15479 	rv = sc->phy.acquire(sc);
   15480 	if (rv)
   15481 		return rv;
   15482 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15483 	if (rv)
   15484 		goto release;
   15485 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15486 	    phy_data & 0x00ff);
   15487 	if (rv)
   15488 		goto release;
   15489 
   15490 	/* set MSE higher to enable link to stay up when noise is high */
   15491 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15492 release:
   15493 	sc->phy.release(sc);
   15494 
   15495 	return rv;
   15496 
   15497 
   15498 }
   15499 
   15500 /*
   15501  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15502  *  @sc:   pointer to the HW structure
   15503  */
   15504 static void
   15505 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15506 {
   15507 	device_t dev = sc->sc_dev;
   15508 	uint32_t mac_reg;
   15509 	uint16_t i, wuce;
   15510 	int count;
   15511 
   15512 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15513 		device_xname(sc->sc_dev), __func__));
   15514 
   15515 	if (sc->phy.acquire(sc) != 0)
   15516 		return;
   15517 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15518 		goto release;
   15519 
   15520 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15521 	count = wm_rar_count(sc);
   15522 	for (i = 0; i < count; i++) {
   15523 		uint16_t lo, hi;
   15524 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15525 		lo = (uint16_t)(mac_reg & 0xffff);
   15526 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15527 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15528 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15529 
   15530 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15531 		lo = (uint16_t)(mac_reg & 0xffff);
   15532 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15533 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15534 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15535 	}
   15536 
   15537 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15538 
   15539 release:
   15540 	sc->phy.release(sc);
   15541 }
   15542 
   15543 /*
   15544  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15545  *  done after every PHY reset.
   15546  */
   15547 static int
   15548 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15549 {
   15550 	device_t dev = sc->sc_dev;
   15551 	int rv;
   15552 
   15553 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15554 		device_xname(dev), __func__));
   15555 	KASSERT(sc->sc_type == WM_T_PCH2);
   15556 
   15557 	/* Set MDIO slow mode before any other MDIO access */
   15558 	rv = wm_set_mdio_slow_mode_hv(sc);
   15559 	if (rv != 0)
   15560 		return rv;
   15561 
   15562 	rv = sc->phy.acquire(sc);
   15563 	if (rv != 0)
   15564 		return rv;
   15565 	/* set MSE higher to enable link to stay up when noise is high */
   15566 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15567 	if (rv != 0)
   15568 		goto release;
   15569 	/* drop link after 5 times MSE threshold was reached */
   15570 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15571 release:
   15572 	sc->phy.release(sc);
   15573 
   15574 	return rv;
   15575 }
   15576 
   15577 /**
   15578  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15579  *  @link: link up bool flag
   15580  *
   15581  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15582  *  preventing further DMA write requests.  Workaround the issue by disabling
   15583  *  the de-assertion of the clock request when in 1Gpbs mode.
   15584  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15585  *  speeds in order to avoid Tx hangs.
   15586  **/
   15587 static int
   15588 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15589 {
   15590 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15591 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15592 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15593 	uint16_t phyreg;
   15594 
   15595 	if (link && (speed == STATUS_SPEED_1000)) {
   15596 		sc->phy.acquire(sc);
   15597 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15598 		    &phyreg);
   15599 		if (rv != 0)
   15600 			goto release;
   15601 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15602 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15603 		if (rv != 0)
   15604 			goto release;
   15605 		delay(20);
   15606 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15607 
   15608 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15609 		    &phyreg);
   15610 release:
   15611 		sc->phy.release(sc);
   15612 		return rv;
   15613 	}
   15614 
   15615 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15616 
   15617 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15618 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15619 	    || !link
   15620 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15621 		goto update_fextnvm6;
   15622 
   15623 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15624 
   15625 	/* Clear link status transmit timeout */
   15626 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15627 	if (speed == STATUS_SPEED_100) {
   15628 		/* Set inband Tx timeout to 5x10us for 100Half */
   15629 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15630 
   15631 		/* Do not extend the K1 entry latency for 100Half */
   15632 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15633 	} else {
   15634 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15635 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15636 
   15637 		/* Extend the K1 entry latency for 10 Mbps */
   15638 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15639 	}
   15640 
   15641 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15642 
   15643 update_fextnvm6:
   15644 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15645 	return 0;
   15646 }
   15647 
   15648 /*
   15649  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15650  *  @sc:   pointer to the HW structure
   15651  *  @link: link up bool flag
   15652  *
   15653  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15654  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15655  *  If link is down, the function will restore the default K1 setting located
   15656  *  in the NVM.
   15657  */
   15658 static int
   15659 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15660 {
   15661 	int k1_enable = sc->sc_nvm_k1_enabled;
   15662 
   15663 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15664 		device_xname(sc->sc_dev), __func__));
   15665 
   15666 	if (sc->phy.acquire(sc) != 0)
   15667 		return -1;
   15668 
   15669 	if (link) {
   15670 		k1_enable = 0;
   15671 
   15672 		/* Link stall fix for link up */
   15673 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15674 		    0x0100);
   15675 	} else {
   15676 		/* Link stall fix for link down */
   15677 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15678 		    0x4100);
   15679 	}
   15680 
   15681 	wm_configure_k1_ich8lan(sc, k1_enable);
   15682 	sc->phy.release(sc);
   15683 
   15684 	return 0;
   15685 }
   15686 
   15687 /*
   15688  *  wm_k1_workaround_lv - K1 Si workaround
   15689  *  @sc:   pointer to the HW structure
   15690  *
   15691  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15692  *  Disable K1 for 1000 and 100 speeds
   15693  */
   15694 static int
   15695 wm_k1_workaround_lv(struct wm_softc *sc)
   15696 {
   15697 	uint32_t reg;
   15698 	uint16_t phyreg;
   15699 	int rv;
   15700 
   15701 	if (sc->sc_type != WM_T_PCH2)
   15702 		return 0;
   15703 
   15704 	/* Set K1 beacon duration based on 10Mbps speed */
   15705 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15706 	if (rv != 0)
   15707 		return rv;
   15708 
   15709 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15710 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15711 		if (phyreg &
   15712 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15713 			/* LV 1G/100 Packet drop issue wa  */
   15714 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15715 			    &phyreg);
   15716 			if (rv != 0)
   15717 				return rv;
   15718 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15719 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15720 			    phyreg);
   15721 			if (rv != 0)
   15722 				return rv;
   15723 		} else {
   15724 			/* For 10Mbps */
   15725 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15726 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15727 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15728 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15729 		}
   15730 	}
   15731 
   15732 	return 0;
   15733 }
   15734 
   15735 /*
   15736  *  wm_link_stall_workaround_hv - Si workaround
   15737  *  @sc: pointer to the HW structure
   15738  *
   15739  *  This function works around a Si bug where the link partner can get
   15740  *  a link up indication before the PHY does. If small packets are sent
   15741  *  by the link partner they can be placed in the packet buffer without
   15742  *  being properly accounted for by the PHY and will stall preventing
   15743  *  further packets from being received.  The workaround is to clear the
   15744  *  packet buffer after the PHY detects link up.
   15745  */
   15746 static int
   15747 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15748 {
   15749 	uint16_t phyreg;
   15750 
   15751 	if (sc->sc_phytype != WMPHY_82578)
   15752 		return 0;
   15753 
   15754 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15755 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15756 	if ((phyreg & BMCR_LOOP) != 0)
   15757 		return 0;
   15758 
   15759 	/* check if link is up and at 1Gbps */
   15760 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15761 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15762 	    | BM_CS_STATUS_SPEED_MASK;
   15763 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15764 		| BM_CS_STATUS_SPEED_1000))
   15765 		return 0;
   15766 
   15767 	delay(200 * 1000);	/* XXX too big */
   15768 
   15769 	/* flush the packets in the fifo buffer */
   15770 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15771 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15772 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15773 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15774 
   15775 	return 0;
   15776 }
   15777 
   15778 static int
   15779 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15780 {
   15781 	int rv;
   15782 	uint16_t reg;
   15783 
   15784 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15785 	if (rv != 0)
   15786 		return rv;
   15787 
   15788 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15789 	    reg | HV_KMRN_MDIO_SLOW);
   15790 }
   15791 
   15792 /*
   15793  *  wm_configure_k1_ich8lan - Configure K1 power state
   15794  *  @sc: pointer to the HW structure
   15795  *  @enable: K1 state to configure
   15796  *
   15797  *  Configure the K1 power state based on the provided parameter.
   15798  *  Assumes semaphore already acquired.
   15799  */
   15800 static void
   15801 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15802 {
   15803 	uint32_t ctrl, ctrl_ext, tmp;
   15804 	uint16_t kmreg;
   15805 	int rv;
   15806 
   15807 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15808 
   15809 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15810 	if (rv != 0)
   15811 		return;
   15812 
   15813 	if (k1_enable)
   15814 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15815 	else
   15816 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15817 
   15818 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15819 	if (rv != 0)
   15820 		return;
   15821 
   15822 	delay(20);
   15823 
   15824 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15825 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15826 
   15827 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15828 	tmp |= CTRL_FRCSPD;
   15829 
   15830 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15831 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15832 	CSR_WRITE_FLUSH(sc);
   15833 	delay(20);
   15834 
   15835 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15836 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15837 	CSR_WRITE_FLUSH(sc);
   15838 	delay(20);
   15839 
   15840 	return;
   15841 }
   15842 
   15843 /* special case - for 82575 - need to do manual init ... */
   15844 static void
   15845 wm_reset_init_script_82575(struct wm_softc *sc)
   15846 {
   15847 	/*
   15848 	 * remark: this is untested code - we have no board without EEPROM
   15849 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15850 	 */
   15851 
   15852 	/* SerDes configuration via SERDESCTRL */
   15853 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15854 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15855 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15856 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15857 
   15858 	/* CCM configuration via CCMCTL register */
   15859 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15860 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15861 
   15862 	/* PCIe lanes configuration */
   15863 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15864 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15865 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15866 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15867 
   15868 	/* PCIe PLL Configuration */
   15869 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15870 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15871 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15872 }
   15873 
   15874 static void
   15875 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15876 {
   15877 	uint32_t reg;
   15878 	uint16_t nvmword;
   15879 	int rv;
   15880 
   15881 	if (sc->sc_type != WM_T_82580)
   15882 		return;
   15883 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15884 		return;
   15885 
   15886 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15887 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15888 	if (rv != 0) {
   15889 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15890 		    __func__);
   15891 		return;
   15892 	}
   15893 
   15894 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15895 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15896 		reg |= MDICNFG_DEST;
   15897 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15898 		reg |= MDICNFG_COM_MDIO;
   15899 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15900 }
   15901 
   15902 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15903 
   15904 static bool
   15905 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15906 {
   15907 	uint32_t reg;
   15908 	uint16_t id1, id2;
   15909 	int i, rv;
   15910 
   15911 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15912 		device_xname(sc->sc_dev), __func__));
   15913 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15914 
   15915 	id1 = id2 = 0xffff;
   15916 	for (i = 0; i < 2; i++) {
   15917 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15918 		    &id1);
   15919 		if ((rv != 0) || MII_INVALIDID(id1))
   15920 			continue;
   15921 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15922 		    &id2);
   15923 		if ((rv != 0) || MII_INVALIDID(id2))
   15924 			continue;
   15925 		break;
   15926 	}
   15927 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15928 		goto out;
   15929 
   15930 	/*
   15931 	 * In case the PHY needs to be in mdio slow mode,
   15932 	 * set slow mode and try to get the PHY id again.
   15933 	 */
   15934 	rv = 0;
   15935 	if (sc->sc_type < WM_T_PCH_LPT) {
   15936 		sc->phy.release(sc);
   15937 		wm_set_mdio_slow_mode_hv(sc);
   15938 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15939 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15940 		sc->phy.acquire(sc);
   15941 	}
   15942 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15943 		printf("XXX return with false\n");
   15944 		return false;
   15945 	}
   15946 out:
   15947 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15948 		/* Only unforce SMBus if ME is not active */
   15949 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15950 			uint16_t phyreg;
   15951 
   15952 			/* Unforce SMBus mode in PHY */
   15953 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15954 			    CV_SMB_CTRL, &phyreg);
   15955 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15956 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15957 			    CV_SMB_CTRL, phyreg);
   15958 
   15959 			/* Unforce SMBus mode in MAC */
   15960 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15961 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15962 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15963 		}
   15964 	}
   15965 	return true;
   15966 }
   15967 
   15968 static void
   15969 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15970 {
   15971 	uint32_t reg;
   15972 	int i;
   15973 
   15974 	/* Set PHY Config Counter to 50msec */
   15975 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15976 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15977 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15978 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15979 
   15980 	/* Toggle LANPHYPC */
   15981 	reg = CSR_READ(sc, WMREG_CTRL);
   15982 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15983 	reg &= ~CTRL_LANPHYPC_VALUE;
   15984 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15985 	CSR_WRITE_FLUSH(sc);
   15986 	delay(1000);
   15987 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15988 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15989 	CSR_WRITE_FLUSH(sc);
   15990 
   15991 	if (sc->sc_type < WM_T_PCH_LPT)
   15992 		delay(50 * 1000);
   15993 	else {
   15994 		i = 20;
   15995 
   15996 		do {
   15997 			delay(5 * 1000);
   15998 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15999 		    && i--);
   16000 
   16001 		delay(30 * 1000);
   16002 	}
   16003 }
   16004 
   16005 static int
   16006 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16007 {
   16008 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16009 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16010 	uint32_t rxa;
   16011 	uint16_t scale = 0, lat_enc = 0;
   16012 	int32_t obff_hwm = 0;
   16013 	int64_t lat_ns, value;
   16014 
   16015 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16016 		device_xname(sc->sc_dev), __func__));
   16017 
   16018 	if (link) {
   16019 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16020 		uint32_t status;
   16021 		uint16_t speed;
   16022 		pcireg_t preg;
   16023 
   16024 		status = CSR_READ(sc, WMREG_STATUS);
   16025 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16026 		case STATUS_SPEED_10:
   16027 			speed = 10;
   16028 			break;
   16029 		case STATUS_SPEED_100:
   16030 			speed = 100;
   16031 			break;
   16032 		case STATUS_SPEED_1000:
   16033 			speed = 1000;
   16034 			break;
   16035 		default:
   16036 			device_printf(sc->sc_dev, "Unknown speed "
   16037 			    "(status = %08x)\n", status);
   16038 			return -1;
   16039 		}
   16040 
   16041 		/* Rx Packet Buffer Allocation size (KB) */
   16042 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16043 
   16044 		/*
   16045 		 * Determine the maximum latency tolerated by the device.
   16046 		 *
   16047 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16048 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16049 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16050 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16051 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16052 		 */
   16053 		lat_ns = ((int64_t)rxa * 1024 -
   16054 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16055 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16056 		if (lat_ns < 0)
   16057 			lat_ns = 0;
   16058 		else
   16059 			lat_ns /= speed;
   16060 		value = lat_ns;
   16061 
   16062 		while (value > LTRV_VALUE) {
   16063 			scale ++;
   16064 			value = howmany(value, __BIT(5));
   16065 		}
   16066 		if (scale > LTRV_SCALE_MAX) {
   16067 			printf("%s: Invalid LTR latency scale %d\n",
   16068 			    device_xname(sc->sc_dev), scale);
   16069 			return -1;
   16070 		}
   16071 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16072 
   16073 		/* Determine the maximum latency tolerated by the platform */
   16074 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16075 		    WM_PCI_LTR_CAP_LPT);
   16076 		max_snoop = preg & 0xffff;
   16077 		max_nosnoop = preg >> 16;
   16078 
   16079 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16080 
   16081 		if (lat_enc > max_ltr_enc) {
   16082 			lat_enc = max_ltr_enc;
   16083 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16084 			    * PCI_LTR_SCALETONS(
   16085 				    __SHIFTOUT(lat_enc,
   16086 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16087 		}
   16088 
   16089 		if (lat_ns) {
   16090 			lat_ns *= speed * 1000;
   16091 			lat_ns /= 8;
   16092 			lat_ns /= 1000000000;
   16093 			obff_hwm = (int32_t)(rxa - lat_ns);
   16094 		}
   16095 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16096 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16097 			    "(rxa = %d, lat_ns = %d)\n",
   16098 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16099 			return -1;
   16100 		}
   16101 	}
   16102 	/* Snoop and No-Snoop latencies the same */
   16103 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16104 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16105 
   16106 	/* Set OBFF high water mark */
   16107 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16108 	reg |= obff_hwm;
   16109 	CSR_WRITE(sc, WMREG_SVT, reg);
   16110 
   16111 	/* Enable OBFF */
   16112 	reg = CSR_READ(sc, WMREG_SVCR);
   16113 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16114 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16115 
   16116 	return 0;
   16117 }
   16118 
   16119 /*
   16120  * I210 Errata 25 and I211 Errata 10
   16121  * Slow System Clock.
   16122  */
   16123 static int
   16124 wm_pll_workaround_i210(struct wm_softc *sc)
   16125 {
   16126 	uint32_t mdicnfg, wuc;
   16127 	uint32_t reg;
   16128 	pcireg_t pcireg;
   16129 	uint32_t pmreg;
   16130 	uint16_t nvmword, tmp_nvmword;
   16131 	uint16_t phyval;
   16132 	bool wa_done = false;
   16133 	int i, rv = 0;
   16134 
   16135 	/* Get Power Management cap offset */
   16136 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16137 	    &pmreg, NULL) == 0)
   16138 		return -1;
   16139 
   16140 	/* Save WUC and MDICNFG registers */
   16141 	wuc = CSR_READ(sc, WMREG_WUC);
   16142 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16143 
   16144 	reg = mdicnfg & ~MDICNFG_DEST;
   16145 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16146 
   16147 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16148 		nvmword = INVM_DEFAULT_AL;
   16149 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16150 
   16151 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16152 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16153 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16154 
   16155 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16156 			rv = 0;
   16157 			break; /* OK */
   16158 		} else
   16159 			rv = -1;
   16160 
   16161 		wa_done = true;
   16162 		/* Directly reset the internal PHY */
   16163 		reg = CSR_READ(sc, WMREG_CTRL);
   16164 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16165 
   16166 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16167 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16168 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16169 
   16170 		CSR_WRITE(sc, WMREG_WUC, 0);
   16171 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16172 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16173 
   16174 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16175 		    pmreg + PCI_PMCSR);
   16176 		pcireg |= PCI_PMCSR_STATE_D3;
   16177 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16178 		    pmreg + PCI_PMCSR, pcireg);
   16179 		delay(1000);
   16180 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16181 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16182 		    pmreg + PCI_PMCSR, pcireg);
   16183 
   16184 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16185 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16186 
   16187 		/* Restore WUC register */
   16188 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16189 	}
   16190 
   16191 	/* Restore MDICNFG setting */
   16192 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16193 	if (wa_done)
   16194 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16195 	return rv;
   16196 }
   16197 
   16198 static void
   16199 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16200 {
   16201 	uint32_t reg;
   16202 
   16203 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16204 		device_xname(sc->sc_dev), __func__));
   16205 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16206 	    || (sc->sc_type == WM_T_PCH_CNP));
   16207 
   16208 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16209 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16210 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16211 
   16212 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16213 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16214 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16215 }
   16216