Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.627
      1 /*	$NetBSD: if_wm.c,v 1.627 2019/02/21 08:10:22 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.627 2019/02/21 08:10:22 knakahara Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_ec_capenable;		/* last ec_capenable */
    518 	int sc_flowflags;		/* 802.3x flow control flags */
    519 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    520 	int sc_align_tweak;
    521 
    522 	void *sc_ihs[WM_MAX_NINTR];	/*
    523 					 * interrupt cookie.
    524 					 * - legacy and msi use sc_ihs[0] only
    525 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	pci_intr_handle_t *sc_intrs;	/*
    528 					 * legacy and msi use sc_intrs[0] only
    529 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    530 					 */
    531 	int sc_nintrs;			/* number of interrupts */
    532 
    533 	int sc_link_intr_idx;		/* index of MSI-X tables */
    534 
    535 	callout_t sc_tick_ch;		/* tick callout */
    536 	bool sc_core_stopping;
    537 
    538 	int sc_nvm_ver_major;
    539 	int sc_nvm_ver_minor;
    540 	int sc_nvm_ver_build;
    541 	int sc_nvm_addrbits;		/* NVM address bits */
    542 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    543 	int sc_ich8_flash_base;
    544 	int sc_ich8_flash_bank_size;
    545 	int sc_nvm_k1_enabled;
    546 
    547 	int sc_nqueues;
    548 	struct wm_queue *sc_queue;
    549 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    550 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    551 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    552 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    553 
    554 	int sc_affinity_offset;
    555 
    556 #ifdef WM_EVENT_COUNTERS
    557 	/* Event counters. */
    558 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    559 
    560 	/* WM_T_82542_2_1 only */
    561 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    564 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    565 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    566 #endif /* WM_EVENT_COUNTERS */
    567 
    568 	/* This variable are used only on the 82547. */
    569 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    570 
    571 	uint32_t sc_ctrl;		/* prototype CTRL register */
    572 #if 0
    573 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    574 #endif
    575 	uint32_t sc_icr;		/* prototype interrupt bits */
    576 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    577 	uint32_t sc_tctl;		/* prototype TCTL register */
    578 	uint32_t sc_rctl;		/* prototype RCTL register */
    579 	uint32_t sc_txcw;		/* prototype TXCW register */
    580 	uint32_t sc_tipg;		/* prototype TIPG register */
    581 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    582 	uint32_t sc_pba;		/* prototype PBA register */
    583 
    584 	int sc_tbi_linkup;		/* TBI link status */
    585 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    586 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    587 
    588 	int sc_mchash_type;		/* multicast filter offset */
    589 
    590 	krndsource_t rnd_source;	/* random source */
    591 
    592 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    593 
    594 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    595 	kmutex_t *sc_ich_phymtx;	/*
    596 					 * 82574/82583/ICH/PCH specific PHY
    597 					 * mutex. For 82574/82583, the mutex
    598 					 * is used for both PHY and NVM.
    599 					 */
    600 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    601 
    602 	struct wm_phyop phy;
    603 	struct wm_nvmop nvm;
    604 };
    605 
    606 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    607 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    608 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    609 
    610 #define	WM_RXCHAIN_RESET(rxq)						\
    611 do {									\
    612 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    613 	*(rxq)->rxq_tailp = NULL;					\
    614 	(rxq)->rxq_len = 0;						\
    615 } while (/*CONSTCOND*/0)
    616 
    617 #define	WM_RXCHAIN_LINK(rxq, m)						\
    618 do {									\
    619 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    620 	(rxq)->rxq_tailp = &(m)->m_next;				\
    621 } while (/*CONSTCOND*/0)
    622 
    623 #ifdef WM_EVENT_COUNTERS
    624 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    625 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    626 
    627 #define WM_Q_EVCNT_INCR(qname, evname)			\
    628 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    629 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    630 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    631 #else /* !WM_EVENT_COUNTERS */
    632 #define	WM_EVCNT_INCR(ev)	/* nothing */
    633 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    634 
    635 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    636 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    637 #endif /* !WM_EVENT_COUNTERS */
    638 
    639 #define	CSR_READ(sc, reg)						\
    640 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    641 #define	CSR_WRITE(sc, reg, val)						\
    642 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    643 #define	CSR_WRITE_FLUSH(sc)						\
    644 	(void) CSR_READ((sc), WMREG_STATUS)
    645 
    646 #define ICH8_FLASH_READ32(sc, reg)					\
    647 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    648 	    (reg) + sc->sc_flashreg_offset)
    649 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    650 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset, (data))
    652 
    653 #define ICH8_FLASH_READ16(sc, reg)					\
    654 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    655 	    (reg) + sc->sc_flashreg_offset)
    656 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    657 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    658 	    (reg) + sc->sc_flashreg_offset, (data))
    659 
    660 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    661 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    662 
    663 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    664 #define	WM_CDTXADDR_HI(txq, x)						\
    665 	(sizeof(bus_addr_t) == 8 ?					\
    666 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    667 
    668 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    669 #define	WM_CDRXADDR_HI(rxq, x)						\
    670 	(sizeof(bus_addr_t) == 8 ?					\
    671 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    672 
    673 /*
    674  * Register read/write functions.
    675  * Other than CSR_{READ|WRITE}().
    676  */
    677 #if 0
    678 static inline uint32_t wm_io_read(struct wm_softc *, int);
    679 #endif
    680 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    681 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    682     uint32_t, uint32_t);
    683 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    684 
    685 /*
    686  * Descriptor sync/init functions.
    687  */
    688 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    689 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    690 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    691 
    692 /*
    693  * Device driver interface functions and commonly used functions.
    694  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    695  */
    696 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    697 static int	wm_match(device_t, cfdata_t, void *);
    698 static void	wm_attach(device_t, device_t, void *);
    699 static int	wm_detach(device_t, int);
    700 static bool	wm_suspend(device_t, const pmf_qual_t *);
    701 static bool	wm_resume(device_t, const pmf_qual_t *);
    702 static void	wm_watchdog(struct ifnet *);
    703 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    706     uint16_t *);
    707 static void	wm_tick(void *);
    708 static int	wm_ifflags_cb(struct ethercom *);
    709 static int	wm_ioctl(struct ifnet *, u_long, void *);
    710 /* MAC address related */
    711 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    712 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    713 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    714 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    715 static int	wm_rar_count(struct wm_softc *);
    716 static void	wm_set_filter(struct wm_softc *);
    717 /* Reset and init related */
    718 static void	wm_set_vlan(struct wm_softc *);
    719 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    720 static void	wm_get_auto_rd_done(struct wm_softc *);
    721 static void	wm_lan_init_done(struct wm_softc *);
    722 static void	wm_get_cfg_done(struct wm_softc *);
    723 static int	wm_phy_post_reset(struct wm_softc *);
    724 static int	wm_write_smbus_addr(struct wm_softc *);
    725 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    726 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    727 static void	wm_initialize_hardware_bits(struct wm_softc *);
    728 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    729 static int	wm_reset_phy(struct wm_softc *);
    730 static void	wm_flush_desc_rings(struct wm_softc *);
    731 static void	wm_reset(struct wm_softc *);
    732 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    733 static void	wm_rxdrain(struct wm_rxqueue *);
    734 static void	wm_init_rss(struct wm_softc *);
    735 static void	wm_adjust_qnum(struct wm_softc *, int);
    736 static inline bool	wm_is_using_msix(struct wm_softc *);
    737 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    738 static int	wm_softint_establish(struct wm_softc *, int, int);
    739 static int	wm_setup_legacy(struct wm_softc *);
    740 static int	wm_setup_msix(struct wm_softc *);
    741 static int	wm_init(struct ifnet *);
    742 static int	wm_init_locked(struct ifnet *);
    743 static void	wm_unset_stopping_flags(struct wm_softc *);
    744 static void	wm_set_stopping_flags(struct wm_softc *);
    745 static void	wm_stop(struct ifnet *, int);
    746 static void	wm_stop_locked(struct ifnet *, int);
    747 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    748 static void	wm_82547_txfifo_stall(void *);
    749 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    750 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    751 /* DMA related */
    752 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    753 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    754 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    756     struct wm_txqueue *);
    757 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    758 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    760     struct wm_rxqueue *);
    761 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    762 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    763 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    765 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    766 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    768     struct wm_txqueue *);
    769 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    770     struct wm_rxqueue *);
    771 static int	wm_alloc_txrx_queues(struct wm_softc *);
    772 static void	wm_free_txrx_queues(struct wm_softc *);
    773 static int	wm_init_txrx_queues(struct wm_softc *);
    774 /* Start */
    775 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    776     struct wm_txsoft *, uint32_t *, uint8_t *);
    777 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    778 static void	wm_start(struct ifnet *);
    779 static void	wm_start_locked(struct ifnet *);
    780 static int	wm_transmit(struct ifnet *, struct mbuf *);
    781 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    782 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    783     bool);
    784 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    785     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    786 static void	wm_nq_start(struct ifnet *);
    787 static void	wm_nq_start_locked(struct ifnet *);
    788 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    789 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    790 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    791     bool);
    792 static void	wm_deferred_start_locked(struct wm_txqueue *);
    793 static void	wm_handle_queue(void *);
    794 /* Interrupt */
    795 static bool	wm_txeof(struct wm_txqueue *, u_int);
    796 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    797 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    798 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    799 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr(struct wm_softc *, uint32_t);
    801 static int	wm_intr_legacy(void *);
    802 static inline void	wm_txrxintr_disable(struct wm_queue *);
    803 static inline void	wm_txrxintr_enable(struct wm_queue *);
    804 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    805 static int	wm_txrxintr_msix(void *);
    806 static int	wm_linkintr_msix(void *);
    807 
    808 /*
    809  * Media related.
    810  * GMII, SGMII, TBI, SERDES and SFP.
    811  */
    812 /* Common */
    813 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    814 /* GMII related */
    815 static void	wm_gmii_reset(struct wm_softc *);
    816 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    817 static int	wm_get_phy_id_82575(struct wm_softc *);
    818 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    819 static int	wm_gmii_mediachange(struct ifnet *);
    820 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    821 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    822 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    823 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    824 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    825 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    826 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    827 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    828 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    830 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    831 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    832 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    833 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    834 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    835 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    836 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    837 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    838 	bool);
    839 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    840 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    842 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    843 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    844 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    845 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    847 static void	wm_gmii_statchg(struct ifnet *);
    848 /*
    849  * kumeran related (80003, ICH* and PCH*).
    850  * These functions are not for accessing MII registers but for accessing
    851  * kumeran specific registers.
    852  */
    853 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    854 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    855 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    856 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    857 /* EMI register related */
    858 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    859 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    860 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    861 /* SGMII */
    862 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    863 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    864 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    865 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    866 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    867 /* TBI related */
    868 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    869 static void	wm_tbi_mediainit(struct wm_softc *);
    870 static int	wm_tbi_mediachange(struct ifnet *);
    871 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    872 static int	wm_check_for_link(struct wm_softc *);
    873 static void	wm_tbi_tick(struct wm_softc *);
    874 /* SERDES related */
    875 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    876 static int	wm_serdes_mediachange(struct ifnet *);
    877 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    878 static void	wm_serdes_tick(struct wm_softc *);
    879 /* SFP related */
    880 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    881 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    882 
    883 /*
    884  * NVM related.
    885  * Microwire, SPI (w/wo EERD) and Flash.
    886  */
    887 /* Misc functions */
    888 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    889 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    890 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    891 /* Microwire */
    892 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    893 /* SPI */
    894 static int	wm_nvm_ready_spi(struct wm_softc *);
    895 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    896 /* Using with EERD */
    897 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    898 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    899 /* Flash */
    900 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    901     unsigned int *);
    902 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    903 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    904 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    905     uint32_t *);
    906 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    907 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    908 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    909 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    910 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    911 /* iNVM */
    912 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    913 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    914 /* Lock, detecting NVM type, validate checksum and read */
    915 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    916 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    917 static int	wm_nvm_validate_checksum(struct wm_softc *);
    918 static void	wm_nvm_version_invm(struct wm_softc *);
    919 static void	wm_nvm_version(struct wm_softc *);
    920 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    921 
    922 /*
    923  * Hardware semaphores.
    924  * Very complexed...
    925  */
    926 static int	wm_get_null(struct wm_softc *);
    927 static void	wm_put_null(struct wm_softc *);
    928 static int	wm_get_eecd(struct wm_softc *);
    929 static void	wm_put_eecd(struct wm_softc *);
    930 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    931 static void	wm_put_swsm_semaphore(struct wm_softc *);
    932 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    933 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    934 static int	wm_get_nvm_80003(struct wm_softc *);
    935 static void	wm_put_nvm_80003(struct wm_softc *);
    936 static int	wm_get_nvm_82571(struct wm_softc *);
    937 static void	wm_put_nvm_82571(struct wm_softc *);
    938 static int	wm_get_phy_82575(struct wm_softc *);
    939 static void	wm_put_phy_82575(struct wm_softc *);
    940 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    941 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    942 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    943 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    944 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    945 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    946 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    947 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    948 
    949 /*
    950  * Management mode and power management related subroutines.
    951  * BMC, AMT, suspend/resume and EEE.
    952  */
    953 #if 0
    954 static int	wm_check_mng_mode(struct wm_softc *);
    955 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    956 static int	wm_check_mng_mode_82574(struct wm_softc *);
    957 static int	wm_check_mng_mode_generic(struct wm_softc *);
    958 #endif
    959 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    960 static bool	wm_phy_resetisblocked(struct wm_softc *);
    961 static void	wm_get_hw_control(struct wm_softc *);
    962 static void	wm_release_hw_control(struct wm_softc *);
    963 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    964 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    965 static void	wm_init_manageability(struct wm_softc *);
    966 static void	wm_release_manageability(struct wm_softc *);
    967 static void	wm_get_wakeup(struct wm_softc *);
    968 static int	wm_ulp_disable(struct wm_softc *);
    969 static int	wm_enable_phy_wakeup(struct wm_softc *);
    970 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    971 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    972 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    973 static void	wm_enable_wakeup(struct wm_softc *);
    974 static void	wm_disable_aspm(struct wm_softc *);
    975 /* LPLU (Low Power Link Up) */
    976 static void	wm_lplu_d0_disable(struct wm_softc *);
    977 /* EEE */
    978 static int	wm_set_eee_i350(struct wm_softc *);
    979 static int	wm_set_eee_pchlan(struct wm_softc *);
    980 static int	wm_set_eee(struct wm_softc *);
    981 
    982 /*
    983  * Workarounds (mainly PHY related).
    984  * Basically, PHY's workarounds are in the PHY drivers.
    985  */
    986 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    987 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    988 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    989 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    990 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    991 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    992 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    993 static int	wm_k1_workaround_lv(struct wm_softc *);
    994 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    995 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    996 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    997 static void	wm_reset_init_script_82575(struct wm_softc *);
    998 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    999 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1000 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1001 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1002 static int	wm_pll_workaround_i210(struct wm_softc *);
   1003 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1004 
   1005 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1006     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1007 
   1008 /*
   1009  * Devices supported by this driver.
   1010  */
   1011 static const struct wm_product {
   1012 	pci_vendor_id_t		wmp_vendor;
   1013 	pci_product_id_t	wmp_product;
   1014 	const char		*wmp_name;
   1015 	wm_chip_type		wmp_type;
   1016 	uint32_t		wmp_flags;
   1017 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1018 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1019 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1020 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1021 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1022 } wm_products[] = {
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1024 	  "Intel i82542 1000BASE-X Ethernet",
   1025 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1028 	  "Intel i82543GC 1000BASE-X Ethernet",
   1029 	  WM_T_82543,		WMP_F_FIBER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1032 	  "Intel i82543GC 1000BASE-T Ethernet",
   1033 	  WM_T_82543,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1036 	  "Intel i82544EI 1000BASE-T Ethernet",
   1037 	  WM_T_82544,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1040 	  "Intel i82544EI 1000BASE-X Ethernet",
   1041 	  WM_T_82544,		WMP_F_FIBER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1044 	  "Intel i82544GC 1000BASE-T Ethernet",
   1045 	  WM_T_82544,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1048 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1049 	  WM_T_82544,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1052 	  "Intel i82540EM 1000BASE-T Ethernet",
   1053 	  WM_T_82540,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1056 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1057 	  WM_T_82540,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1060 	  "Intel i82540EP 1000BASE-T Ethernet",
   1061 	  WM_T_82540,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1064 	  "Intel i82540EP 1000BASE-T Ethernet",
   1065 	  WM_T_82540,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1068 	  "Intel i82540EP 1000BASE-T Ethernet",
   1069 	  WM_T_82540,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1072 	  "Intel i82545EM 1000BASE-T Ethernet",
   1073 	  WM_T_82545,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1076 	  "Intel i82545GM 1000BASE-T Ethernet",
   1077 	  WM_T_82545_3,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1080 	  "Intel i82545GM 1000BASE-X Ethernet",
   1081 	  WM_T_82545_3,		WMP_F_FIBER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1084 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1085 	  WM_T_82545_3,		WMP_F_SERDES },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1088 	  "Intel i82546EB 1000BASE-T Ethernet",
   1089 	  WM_T_82546,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1092 	  "Intel i82546EB 1000BASE-T Ethernet",
   1093 	  WM_T_82546,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1096 	  "Intel i82545EM 1000BASE-X Ethernet",
   1097 	  WM_T_82545,		WMP_F_FIBER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1100 	  "Intel i82546EB 1000BASE-X Ethernet",
   1101 	  WM_T_82546,		WMP_F_FIBER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1104 	  "Intel i82546GB 1000BASE-T Ethernet",
   1105 	  WM_T_82546_3,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1108 	  "Intel i82546GB 1000BASE-X Ethernet",
   1109 	  WM_T_82546_3,		WMP_F_FIBER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1112 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1113 	  WM_T_82546_3,		WMP_F_SERDES },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1116 	  "i82546GB quad-port Gigabit Ethernet",
   1117 	  WM_T_82546_3,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1120 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1121 	  WM_T_82546_3,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1124 	  "Intel PRO/1000MT (82546GB)",
   1125 	  WM_T_82546_3,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1128 	  "Intel i82541EI 1000BASE-T Ethernet",
   1129 	  WM_T_82541,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1132 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1133 	  WM_T_82541,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1136 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1137 	  WM_T_82541,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1140 	  "Intel i82541ER 1000BASE-T Ethernet",
   1141 	  WM_T_82541_2,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1144 	  "Intel i82541GI 1000BASE-T Ethernet",
   1145 	  WM_T_82541_2,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1148 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1149 	  WM_T_82541_2,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1152 	  "Intel i82541PI 1000BASE-T Ethernet",
   1153 	  WM_T_82541_2,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1156 	  "Intel i82547EI 1000BASE-T Ethernet",
   1157 	  WM_T_82547,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1160 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1161 	  WM_T_82547,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1164 	  "Intel i82547GI 1000BASE-T Ethernet",
   1165 	  WM_T_82547_2,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1168 	  "Intel PRO/1000 PT (82571EB)",
   1169 	  WM_T_82571,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1172 	  "Intel PRO/1000 PF (82571EB)",
   1173 	  WM_T_82571,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1176 	  "Intel PRO/1000 PB (82571EB)",
   1177 	  WM_T_82571,		WMP_F_SERDES },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1180 	  "Intel PRO/1000 QT (82571EB)",
   1181 	  WM_T_82571,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1184 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1185 	  WM_T_82571,		WMP_F_COPPER, },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1188 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1189 	  WM_T_82571,		WMP_F_COPPER, },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1192 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1193 	  WM_T_82571,		WMP_F_SERDES, },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1196 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1197 	  WM_T_82571,		WMP_F_SERDES, },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1200 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1201 	  WM_T_82571,		WMP_F_FIBER, },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1204 	  "Intel i82572EI 1000baseT Ethernet",
   1205 	  WM_T_82572,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1208 	  "Intel i82572EI 1000baseX Ethernet",
   1209 	  WM_T_82572,		WMP_F_FIBER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1212 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1213 	  WM_T_82572,		WMP_F_SERDES },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1216 	  "Intel i82572EI 1000baseT Ethernet",
   1217 	  WM_T_82572,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1220 	  "Intel i82573E",
   1221 	  WM_T_82573,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1224 	  "Intel i82573E IAMT",
   1225 	  WM_T_82573,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1228 	  "Intel i82573L Gigabit Ethernet",
   1229 	  WM_T_82573,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1232 	  "Intel i82574L",
   1233 	  WM_T_82574,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1236 	  "Intel i82574L",
   1237 	  WM_T_82574,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1240 	  "Intel i82583V",
   1241 	  WM_T_82583,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1244 	  "i80003 dual 1000baseT Ethernet",
   1245 	  WM_T_80003,		WMP_F_COPPER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1248 	  "i80003 dual 1000baseX Ethernet",
   1249 	  WM_T_80003,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1252 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1253 	  WM_T_80003,		WMP_F_SERDES },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1256 	  "Intel i80003 1000baseT Ethernet",
   1257 	  WM_T_80003,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1260 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1261 	  WM_T_80003,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1264 	  "Intel i82801H (M_AMT) LAN Controller",
   1265 	  WM_T_ICH8,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1267 	  "Intel i82801H (AMT) LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1270 	  "Intel i82801H LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1273 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1274 	  WM_T_ICH8,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1276 	  "Intel i82801H (M) LAN Controller",
   1277 	  WM_T_ICH8,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1279 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1280 	  WM_T_ICH8,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1282 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1283 	  WM_T_ICH8,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1285 	  "82567V-3 LAN Controller",
   1286 	  WM_T_ICH8,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1288 	  "82801I (AMT) LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1291 	  "82801I 10/100 LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1294 	  "82801I (G) 10/100 LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1297 	  "82801I (GT) 10/100 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1300 	  "82801I (C) LAN Controller",
   1301 	  WM_T_ICH9,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1303 	  "82801I mobile LAN Controller",
   1304 	  WM_T_ICH9,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1306 	  "82801I mobile (V) LAN Controller",
   1307 	  WM_T_ICH9,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1309 	  "82801I mobile (AMT) LAN Controller",
   1310 	  WM_T_ICH9,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1312 	  "82567LM-4 LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1315 	  "82567LM-2 LAN Controller",
   1316 	  WM_T_ICH10,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1318 	  "82567LF-2 LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1321 	  "82567LM-3 LAN Controller",
   1322 	  WM_T_ICH10,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1324 	  "82567LF-3 LAN Controller",
   1325 	  WM_T_ICH10,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1327 	  "82567V-2 LAN Controller",
   1328 	  WM_T_ICH10,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1330 	  "82567V-3? LAN Controller",
   1331 	  WM_T_ICH10,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1333 	  "HANKSVILLE LAN Controller",
   1334 	  WM_T_ICH10,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1336 	  "PCH LAN (82577LM) Controller",
   1337 	  WM_T_PCH,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1339 	  "PCH LAN (82577LC) Controller",
   1340 	  WM_T_PCH,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1342 	  "PCH LAN (82578DM) Controller",
   1343 	  WM_T_PCH,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1345 	  "PCH LAN (82578DC) Controller",
   1346 	  WM_T_PCH,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1348 	  "PCH2 LAN (82579LM) Controller",
   1349 	  WM_T_PCH2,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1351 	  "PCH2 LAN (82579V) Controller",
   1352 	  WM_T_PCH2,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1354 	  "82575EB dual-1000baseT Ethernet",
   1355 	  WM_T_82575,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1357 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1358 	  WM_T_82575,		WMP_F_SERDES },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1360 	  "82575GB quad-1000baseT Ethernet",
   1361 	  WM_T_82575,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1363 	  "82575GB quad-1000baseT Ethernet (PM)",
   1364 	  WM_T_82575,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1366 	  "82576 1000BaseT Ethernet",
   1367 	  WM_T_82576,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1369 	  "82576 1000BaseX Ethernet",
   1370 	  WM_T_82576,		WMP_F_FIBER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1373 	  "82576 gigabit Ethernet (SERDES)",
   1374 	  WM_T_82576,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1377 	  "82576 quad-1000BaseT Ethernet",
   1378 	  WM_T_82576,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1381 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1382 	  WM_T_82576,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1385 	  "82576 gigabit Ethernet",
   1386 	  WM_T_82576,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1389 	  "82576 gigabit Ethernet (SERDES)",
   1390 	  WM_T_82576,		WMP_F_SERDES },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1392 	  "82576 quad-gigabit Ethernet (SERDES)",
   1393 	  WM_T_82576,		WMP_F_SERDES },
   1394 
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1396 	  "82580 1000BaseT Ethernet",
   1397 	  WM_T_82580,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1399 	  "82580 1000BaseX Ethernet",
   1400 	  WM_T_82580,		WMP_F_FIBER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1403 	  "82580 1000BaseT Ethernet (SERDES)",
   1404 	  WM_T_82580,		WMP_F_SERDES },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1407 	  "82580 gigabit Ethernet (SGMII)",
   1408 	  WM_T_82580,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1410 	  "82580 dual-1000BaseT Ethernet",
   1411 	  WM_T_82580,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1414 	  "82580 quad-1000BaseX Ethernet",
   1415 	  WM_T_82580,		WMP_F_FIBER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1418 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1419 	  WM_T_82580,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1422 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1423 	  WM_T_82580,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1426 	  "DH89XXCC 1000BASE-KX Ethernet",
   1427 	  WM_T_82580,		WMP_F_SERDES },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1430 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1431 	  WM_T_82580,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1434 	  "I350 Gigabit Network Connection",
   1435 	  WM_T_I350,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1438 	  "I350 Gigabit Fiber Network Connection",
   1439 	  WM_T_I350,		WMP_F_FIBER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1442 	  "I350 Gigabit Backplane Connection",
   1443 	  WM_T_I350,		WMP_F_SERDES },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1446 	  "I350 Quad Port Gigabit Ethernet",
   1447 	  WM_T_I350,		WMP_F_SERDES },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1450 	  "I350 Gigabit Connection",
   1451 	  WM_T_I350,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1454 	  "I354 Gigabit Ethernet (KX)",
   1455 	  WM_T_I354,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1458 	  "I354 Gigabit Ethernet (SGMII)",
   1459 	  WM_T_I354,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1462 	  "I354 Gigabit Ethernet (2.5G)",
   1463 	  WM_T_I354,		WMP_F_COPPER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1466 	  "I210-T1 Ethernet Server Adapter",
   1467 	  WM_T_I210,		WMP_F_COPPER },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1470 	  "I210 Ethernet (Copper OEM)",
   1471 	  WM_T_I210,		WMP_F_COPPER },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1474 	  "I210 Ethernet (Copper IT)",
   1475 	  WM_T_I210,		WMP_F_COPPER },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1478 	  "I210 Ethernet (Copper, FLASH less)",
   1479 	  WM_T_I210,		WMP_F_COPPER },
   1480 
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1482 	  "I210 Gigabit Ethernet (Fiber)",
   1483 	  WM_T_I210,		WMP_F_FIBER },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1486 	  "I210 Gigabit Ethernet (SERDES)",
   1487 	  WM_T_I210,		WMP_F_SERDES },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1490 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1491 	  WM_T_I210,		WMP_F_SERDES },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1494 	  "I210 Gigabit Ethernet (SGMII)",
   1495 	  WM_T_I210,		WMP_F_COPPER },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1498 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1499 	  WM_T_I210,		WMP_F_COPPER },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1502 	  "I211 Ethernet (COPPER)",
   1503 	  WM_T_I211,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1505 	  "I217 V Ethernet Connection",
   1506 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1508 	  "I217 LM Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1511 	  "I218 V Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1517 	  "I218 V Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1520 	  "I218 LM Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1526 	  "I218 LM Ethernet Connection",
   1527 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1529 	  "I219 V Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1538 	  "I219 V Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1556 	  "I219 V Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1559 	  "I219 V Ethernet Connection",
   1560 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1565 	  "I219 LM Ethernet Connection",
   1566 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1567 	{ 0,			0,
   1568 	  NULL,
   1569 	  0,			0 },
   1570 };
   1571 
   1572 /*
   1573  * Register read/write functions.
   1574  * Other than CSR_{READ|WRITE}().
   1575  */
   1576 
   1577 #if 0 /* Not currently used */
   1578 static inline uint32_t
   1579 wm_io_read(struct wm_softc *sc, int reg)
   1580 {
   1581 
   1582 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1583 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1584 }
   1585 #endif
   1586 
   1587 static inline void
   1588 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1589 {
   1590 
   1591 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1592 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1593 }
   1594 
   1595 static inline void
   1596 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1597     uint32_t data)
   1598 {
   1599 	uint32_t regval;
   1600 	int i;
   1601 
   1602 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1603 
   1604 	CSR_WRITE(sc, reg, regval);
   1605 
   1606 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1607 		delay(5);
   1608 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1609 			break;
   1610 	}
   1611 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1612 		aprint_error("%s: WARNING:"
   1613 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1614 		    device_xname(sc->sc_dev), reg);
   1615 	}
   1616 }
   1617 
   1618 static inline void
   1619 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1620 {
   1621 	wa->wa_low = htole32(v & 0xffffffffU);
   1622 	if (sizeof(bus_addr_t) == 8)
   1623 		wa->wa_high = htole32((uint64_t) v >> 32);
   1624 	else
   1625 		wa->wa_high = 0;
   1626 }
   1627 
   1628 /*
   1629  * Descriptor sync/init functions.
   1630  */
   1631 static inline void
   1632 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1633 {
   1634 	struct wm_softc *sc = txq->txq_sc;
   1635 
   1636 	/* If it will wrap around, sync to the end of the ring. */
   1637 	if ((start + num) > WM_NTXDESC(txq)) {
   1638 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1639 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1640 		    (WM_NTXDESC(txq) - start), ops);
   1641 		num -= (WM_NTXDESC(txq) - start);
   1642 		start = 0;
   1643 	}
   1644 
   1645 	/* Now sync whatever is left. */
   1646 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1647 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1648 }
   1649 
   1650 static inline void
   1651 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1652 {
   1653 	struct wm_softc *sc = rxq->rxq_sc;
   1654 
   1655 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1656 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1657 }
   1658 
   1659 static inline void
   1660 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1661 {
   1662 	struct wm_softc *sc = rxq->rxq_sc;
   1663 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1664 	struct mbuf *m = rxs->rxs_mbuf;
   1665 
   1666 	/*
   1667 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1668 	 * so that the payload after the Ethernet header is aligned
   1669 	 * to a 4-byte boundary.
   1670 
   1671 	 * XXX BRAINDAMAGE ALERT!
   1672 	 * The stupid chip uses the same size for every buffer, which
   1673 	 * is set in the Receive Control register.  We are using the 2K
   1674 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1675 	 * reason, we can't "scoot" packets longer than the standard
   1676 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1677 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1678 	 * the upper layer copy the headers.
   1679 	 */
   1680 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1681 
   1682 	if (sc->sc_type == WM_T_82574) {
   1683 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1684 		rxd->erx_data.erxd_addr =
   1685 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1686 		rxd->erx_data.erxd_dd = 0;
   1687 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1688 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1689 
   1690 		rxd->nqrx_data.nrxd_paddr =
   1691 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1692 		/* Currently, split header is not supported. */
   1693 		rxd->nqrx_data.nrxd_haddr = 0;
   1694 	} else {
   1695 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1696 
   1697 		wm_set_dma_addr(&rxd->wrx_addr,
   1698 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1699 		rxd->wrx_len = 0;
   1700 		rxd->wrx_cksum = 0;
   1701 		rxd->wrx_status = 0;
   1702 		rxd->wrx_errors = 0;
   1703 		rxd->wrx_special = 0;
   1704 	}
   1705 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1706 
   1707 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1708 }
   1709 
   1710 /*
   1711  * Device driver interface functions and commonly used functions.
   1712  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1713  */
   1714 
   1715 /* Lookup supported device table */
   1716 static const struct wm_product *
   1717 wm_lookup(const struct pci_attach_args *pa)
   1718 {
   1719 	const struct wm_product *wmp;
   1720 
   1721 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1722 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1723 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1724 			return wmp;
   1725 	}
   1726 	return NULL;
   1727 }
   1728 
   1729 /* The match function (ca_match) */
   1730 static int
   1731 wm_match(device_t parent, cfdata_t cf, void *aux)
   1732 {
   1733 	struct pci_attach_args *pa = aux;
   1734 
   1735 	if (wm_lookup(pa) != NULL)
   1736 		return 1;
   1737 
   1738 	return 0;
   1739 }
   1740 
   1741 /* The attach function (ca_attach) */
   1742 static void
   1743 wm_attach(device_t parent, device_t self, void *aux)
   1744 {
   1745 	struct wm_softc *sc = device_private(self);
   1746 	struct pci_attach_args *pa = aux;
   1747 	prop_dictionary_t dict;
   1748 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1749 	pci_chipset_tag_t pc = pa->pa_pc;
   1750 	int counts[PCI_INTR_TYPE_SIZE];
   1751 	pci_intr_type_t max_type;
   1752 	const char *eetype, *xname;
   1753 	bus_space_tag_t memt;
   1754 	bus_space_handle_t memh;
   1755 	bus_size_t memsize;
   1756 	int memh_valid;
   1757 	int i, error;
   1758 	const struct wm_product *wmp;
   1759 	prop_data_t ea;
   1760 	prop_number_t pn;
   1761 	uint8_t enaddr[ETHER_ADDR_LEN];
   1762 	char buf[256];
   1763 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1764 	pcireg_t preg, memtype;
   1765 	uint16_t eeprom_data, apme_mask;
   1766 	bool force_clear_smbi;
   1767 	uint32_t link_mode;
   1768 	uint32_t reg;
   1769 
   1770 	sc->sc_dev = self;
   1771 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1772 	sc->sc_core_stopping = false;
   1773 
   1774 	wmp = wm_lookup(pa);
   1775 #ifdef DIAGNOSTIC
   1776 	if (wmp == NULL) {
   1777 		printf("\n");
   1778 		panic("wm_attach: impossible");
   1779 	}
   1780 #endif
   1781 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1782 
   1783 	sc->sc_pc = pa->pa_pc;
   1784 	sc->sc_pcitag = pa->pa_tag;
   1785 
   1786 	if (pci_dma64_available(pa))
   1787 		sc->sc_dmat = pa->pa_dmat64;
   1788 	else
   1789 		sc->sc_dmat = pa->pa_dmat;
   1790 
   1791 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1792 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1793 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1794 
   1795 	sc->sc_type = wmp->wmp_type;
   1796 
   1797 	/* Set default function pointers */
   1798 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1799 	sc->phy.release = sc->nvm.release = wm_put_null;
   1800 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1801 
   1802 	if (sc->sc_type < WM_T_82543) {
   1803 		if (sc->sc_rev < 2) {
   1804 			aprint_error_dev(sc->sc_dev,
   1805 			    "i82542 must be at least rev. 2\n");
   1806 			return;
   1807 		}
   1808 		if (sc->sc_rev < 3)
   1809 			sc->sc_type = WM_T_82542_2_0;
   1810 	}
   1811 
   1812 	/*
   1813 	 * Disable MSI for Errata:
   1814 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1815 	 *
   1816 	 *  82544: Errata 25
   1817 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1818 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1819 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1820 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1821 	 *
   1822 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1823 	 *
   1824 	 *  82571 & 82572: Errata 63
   1825 	 */
   1826 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1827 	    || (sc->sc_type == WM_T_82572))
   1828 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1829 
   1830 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1831 	    || (sc->sc_type == WM_T_82580)
   1832 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1833 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1834 		sc->sc_flags |= WM_F_NEWQUEUE;
   1835 
   1836 	/* Set device properties (mactype) */
   1837 	dict = device_properties(sc->sc_dev);
   1838 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1839 
   1840 	/*
   1841 	 * Map the device.  All devices support memory-mapped acccess,
   1842 	 * and it is really required for normal operation.
   1843 	 */
   1844 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1845 	switch (memtype) {
   1846 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1847 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1848 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1849 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1850 		break;
   1851 	default:
   1852 		memh_valid = 0;
   1853 		break;
   1854 	}
   1855 
   1856 	if (memh_valid) {
   1857 		sc->sc_st = memt;
   1858 		sc->sc_sh = memh;
   1859 		sc->sc_ss = memsize;
   1860 	} else {
   1861 		aprint_error_dev(sc->sc_dev,
   1862 		    "unable to map device registers\n");
   1863 		return;
   1864 	}
   1865 
   1866 	/*
   1867 	 * In addition, i82544 and later support I/O mapped indirect
   1868 	 * register access.  It is not desirable (nor supported in
   1869 	 * this driver) to use it for normal operation, though it is
   1870 	 * required to work around bugs in some chip versions.
   1871 	 */
   1872 	if (sc->sc_type >= WM_T_82544) {
   1873 		/* First we have to find the I/O BAR. */
   1874 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1875 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1876 			if (memtype == PCI_MAPREG_TYPE_IO)
   1877 				break;
   1878 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1879 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1880 				i += 4;	/* skip high bits, too */
   1881 		}
   1882 		if (i < PCI_MAPREG_END) {
   1883 			/*
   1884 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1885 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1886 			 * It's no problem because newer chips has no this
   1887 			 * bug.
   1888 			 *
   1889 			 * The i8254x doesn't apparently respond when the
   1890 			 * I/O BAR is 0, which looks somewhat like it's not
   1891 			 * been configured.
   1892 			 */
   1893 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1894 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1895 				aprint_error_dev(sc->sc_dev,
   1896 				    "WARNING: I/O BAR at zero.\n");
   1897 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1898 					0, &sc->sc_iot, &sc->sc_ioh,
   1899 					NULL, &sc->sc_ios) == 0) {
   1900 				sc->sc_flags |= WM_F_IOH_VALID;
   1901 			} else
   1902 				aprint_error_dev(sc->sc_dev,
   1903 				    "WARNING: unable to map I/O space\n");
   1904 		}
   1905 
   1906 	}
   1907 
   1908 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1909 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1910 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1911 	if (sc->sc_type < WM_T_82542_2_1)
   1912 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1913 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1914 
   1915 	/* power up chip */
   1916 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1917 	    && error != EOPNOTSUPP) {
   1918 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1919 		return;
   1920 	}
   1921 
   1922 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1923 	/*
   1924 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1925 	 * resource.
   1926 	 */
   1927 	if (sc->sc_nqueues > 1) {
   1928 		max_type = PCI_INTR_TYPE_MSIX;
   1929 		/*
   1930 		 *  82583 has a MSI-X capability in the PCI configuration space
   1931 		 * but it doesn't support it. At least the document doesn't
   1932 		 * say anything about MSI-X.
   1933 		 */
   1934 		counts[PCI_INTR_TYPE_MSIX]
   1935 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1936 	} else {
   1937 		max_type = PCI_INTR_TYPE_MSI;
   1938 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1939 	}
   1940 
   1941 	/* Allocation settings */
   1942 	counts[PCI_INTR_TYPE_MSI] = 1;
   1943 	counts[PCI_INTR_TYPE_INTX] = 1;
   1944 	/* overridden by disable flags */
   1945 	if (wm_disable_msi != 0) {
   1946 		counts[PCI_INTR_TYPE_MSI] = 0;
   1947 		if (wm_disable_msix != 0) {
   1948 			max_type = PCI_INTR_TYPE_INTX;
   1949 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1950 		}
   1951 	} else if (wm_disable_msix != 0) {
   1952 		max_type = PCI_INTR_TYPE_MSI;
   1953 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1954 	}
   1955 
   1956 alloc_retry:
   1957 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1958 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1959 		return;
   1960 	}
   1961 
   1962 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1963 		error = wm_setup_msix(sc);
   1964 		if (error) {
   1965 			pci_intr_release(pc, sc->sc_intrs,
   1966 			    counts[PCI_INTR_TYPE_MSIX]);
   1967 
   1968 			/* Setup for MSI: Disable MSI-X */
   1969 			max_type = PCI_INTR_TYPE_MSI;
   1970 			counts[PCI_INTR_TYPE_MSI] = 1;
   1971 			counts[PCI_INTR_TYPE_INTX] = 1;
   1972 			goto alloc_retry;
   1973 		}
   1974 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1975 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1976 		error = wm_setup_legacy(sc);
   1977 		if (error) {
   1978 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1979 			    counts[PCI_INTR_TYPE_MSI]);
   1980 
   1981 			/* The next try is for INTx: Disable MSI */
   1982 			max_type = PCI_INTR_TYPE_INTX;
   1983 			counts[PCI_INTR_TYPE_INTX] = 1;
   1984 			goto alloc_retry;
   1985 		}
   1986 	} else {
   1987 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1988 		error = wm_setup_legacy(sc);
   1989 		if (error) {
   1990 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1991 			    counts[PCI_INTR_TYPE_INTX]);
   1992 			return;
   1993 		}
   1994 	}
   1995 
   1996 	/*
   1997 	 * Check the function ID (unit number of the chip).
   1998 	 */
   1999 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2000 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2001 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2002 	    || (sc->sc_type == WM_T_82580)
   2003 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2004 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2005 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2006 	else
   2007 		sc->sc_funcid = 0;
   2008 
   2009 	/*
   2010 	 * Determine a few things about the bus we're connected to.
   2011 	 */
   2012 	if (sc->sc_type < WM_T_82543) {
   2013 		/* We don't really know the bus characteristics here. */
   2014 		sc->sc_bus_speed = 33;
   2015 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2016 		/*
   2017 		 * CSA (Communication Streaming Architecture) is about as fast
   2018 		 * a 32-bit 66MHz PCI Bus.
   2019 		 */
   2020 		sc->sc_flags |= WM_F_CSA;
   2021 		sc->sc_bus_speed = 66;
   2022 		aprint_verbose_dev(sc->sc_dev,
   2023 		    "Communication Streaming Architecture\n");
   2024 		if (sc->sc_type == WM_T_82547) {
   2025 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2026 			callout_setfunc(&sc->sc_txfifo_ch,
   2027 			    wm_82547_txfifo_stall, sc);
   2028 			aprint_verbose_dev(sc->sc_dev,
   2029 			    "using 82547 Tx FIFO stall work-around\n");
   2030 		}
   2031 	} else if (sc->sc_type >= WM_T_82571) {
   2032 		sc->sc_flags |= WM_F_PCIE;
   2033 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2034 		    && (sc->sc_type != WM_T_ICH10)
   2035 		    && (sc->sc_type != WM_T_PCH)
   2036 		    && (sc->sc_type != WM_T_PCH2)
   2037 		    && (sc->sc_type != WM_T_PCH_LPT)
   2038 		    && (sc->sc_type != WM_T_PCH_SPT)
   2039 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2040 			/* ICH* and PCH* have no PCIe capability registers */
   2041 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2042 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2043 				NULL) == 0)
   2044 				aprint_error_dev(sc->sc_dev,
   2045 				    "unable to find PCIe capability\n");
   2046 		}
   2047 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2048 	} else {
   2049 		reg = CSR_READ(sc, WMREG_STATUS);
   2050 		if (reg & STATUS_BUS64)
   2051 			sc->sc_flags |= WM_F_BUS64;
   2052 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2053 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2054 
   2055 			sc->sc_flags |= WM_F_PCIX;
   2056 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2057 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2058 				aprint_error_dev(sc->sc_dev,
   2059 				    "unable to find PCIX capability\n");
   2060 			else if (sc->sc_type != WM_T_82545_3 &&
   2061 				 sc->sc_type != WM_T_82546_3) {
   2062 				/*
   2063 				 * Work around a problem caused by the BIOS
   2064 				 * setting the max memory read byte count
   2065 				 * incorrectly.
   2066 				 */
   2067 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2068 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2069 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2070 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2071 
   2072 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2073 				    PCIX_CMD_BYTECNT_SHIFT;
   2074 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2075 				    PCIX_STATUS_MAXB_SHIFT;
   2076 				if (bytecnt > maxb) {
   2077 					aprint_verbose_dev(sc->sc_dev,
   2078 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2079 					    512 << bytecnt, 512 << maxb);
   2080 					pcix_cmd = (pcix_cmd &
   2081 					    ~PCIX_CMD_BYTECNT_MASK) |
   2082 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2083 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2084 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2085 					    pcix_cmd);
   2086 				}
   2087 			}
   2088 		}
   2089 		/*
   2090 		 * The quad port adapter is special; it has a PCIX-PCIX
   2091 		 * bridge on the board, and can run the secondary bus at
   2092 		 * a higher speed.
   2093 		 */
   2094 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2095 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2096 								      : 66;
   2097 		} else if (sc->sc_flags & WM_F_PCIX) {
   2098 			switch (reg & STATUS_PCIXSPD_MASK) {
   2099 			case STATUS_PCIXSPD_50_66:
   2100 				sc->sc_bus_speed = 66;
   2101 				break;
   2102 			case STATUS_PCIXSPD_66_100:
   2103 				sc->sc_bus_speed = 100;
   2104 				break;
   2105 			case STATUS_PCIXSPD_100_133:
   2106 				sc->sc_bus_speed = 133;
   2107 				break;
   2108 			default:
   2109 				aprint_error_dev(sc->sc_dev,
   2110 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2111 				    reg & STATUS_PCIXSPD_MASK);
   2112 				sc->sc_bus_speed = 66;
   2113 				break;
   2114 			}
   2115 		} else
   2116 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2117 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2118 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2119 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2120 	}
   2121 
   2122 	/* clear interesting stat counters */
   2123 	CSR_READ(sc, WMREG_COLC);
   2124 	CSR_READ(sc, WMREG_RXERRC);
   2125 
   2126 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2127 	    || (sc->sc_type >= WM_T_ICH8))
   2128 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2129 	if (sc->sc_type >= WM_T_ICH8)
   2130 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2131 
   2132 	/* Set PHY, NVM mutex related stuff */
   2133 	switch (sc->sc_type) {
   2134 	case WM_T_82542_2_0:
   2135 	case WM_T_82542_2_1:
   2136 	case WM_T_82543:
   2137 	case WM_T_82544:
   2138 		/* Microwire */
   2139 		sc->nvm.read = wm_nvm_read_uwire;
   2140 		sc->sc_nvm_wordsize = 64;
   2141 		sc->sc_nvm_addrbits = 6;
   2142 		break;
   2143 	case WM_T_82540:
   2144 	case WM_T_82545:
   2145 	case WM_T_82545_3:
   2146 	case WM_T_82546:
   2147 	case WM_T_82546_3:
   2148 		/* Microwire */
   2149 		sc->nvm.read = wm_nvm_read_uwire;
   2150 		reg = CSR_READ(sc, WMREG_EECD);
   2151 		if (reg & EECD_EE_SIZE) {
   2152 			sc->sc_nvm_wordsize = 256;
   2153 			sc->sc_nvm_addrbits = 8;
   2154 		} else {
   2155 			sc->sc_nvm_wordsize = 64;
   2156 			sc->sc_nvm_addrbits = 6;
   2157 		}
   2158 		sc->sc_flags |= WM_F_LOCK_EECD;
   2159 		sc->nvm.acquire = wm_get_eecd;
   2160 		sc->nvm.release = wm_put_eecd;
   2161 		break;
   2162 	case WM_T_82541:
   2163 	case WM_T_82541_2:
   2164 	case WM_T_82547:
   2165 	case WM_T_82547_2:
   2166 		reg = CSR_READ(sc, WMREG_EECD);
   2167 		/*
   2168 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2169 		 * on 8254[17], so set flags and functios before calling it.
   2170 		 */
   2171 		sc->sc_flags |= WM_F_LOCK_EECD;
   2172 		sc->nvm.acquire = wm_get_eecd;
   2173 		sc->nvm.release = wm_put_eecd;
   2174 		if (reg & EECD_EE_TYPE) {
   2175 			/* SPI */
   2176 			sc->nvm.read = wm_nvm_read_spi;
   2177 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2178 			wm_nvm_set_addrbits_size_eecd(sc);
   2179 		} else {
   2180 			/* Microwire */
   2181 			sc->nvm.read = wm_nvm_read_uwire;
   2182 			if ((reg & EECD_EE_ABITS) != 0) {
   2183 				sc->sc_nvm_wordsize = 256;
   2184 				sc->sc_nvm_addrbits = 8;
   2185 			} else {
   2186 				sc->sc_nvm_wordsize = 64;
   2187 				sc->sc_nvm_addrbits = 6;
   2188 			}
   2189 		}
   2190 		break;
   2191 	case WM_T_82571:
   2192 	case WM_T_82572:
   2193 		/* SPI */
   2194 		sc->nvm.read = wm_nvm_read_eerd;
   2195 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2196 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2197 		wm_nvm_set_addrbits_size_eecd(sc);
   2198 		sc->phy.acquire = wm_get_swsm_semaphore;
   2199 		sc->phy.release = wm_put_swsm_semaphore;
   2200 		sc->nvm.acquire = wm_get_nvm_82571;
   2201 		sc->nvm.release = wm_put_nvm_82571;
   2202 		break;
   2203 	case WM_T_82573:
   2204 	case WM_T_82574:
   2205 	case WM_T_82583:
   2206 		sc->nvm.read = wm_nvm_read_eerd;
   2207 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2208 		if (sc->sc_type == WM_T_82573) {
   2209 			sc->phy.acquire = wm_get_swsm_semaphore;
   2210 			sc->phy.release = wm_put_swsm_semaphore;
   2211 			sc->nvm.acquire = wm_get_nvm_82571;
   2212 			sc->nvm.release = wm_put_nvm_82571;
   2213 		} else {
   2214 			/* Both PHY and NVM use the same semaphore. */
   2215 			sc->phy.acquire = sc->nvm.acquire
   2216 			    = wm_get_swfwhw_semaphore;
   2217 			sc->phy.release = sc->nvm.release
   2218 			    = wm_put_swfwhw_semaphore;
   2219 		}
   2220 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2221 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2222 			sc->sc_nvm_wordsize = 2048;
   2223 		} else {
   2224 			/* SPI */
   2225 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2226 			wm_nvm_set_addrbits_size_eecd(sc);
   2227 		}
   2228 		break;
   2229 	case WM_T_82575:
   2230 	case WM_T_82576:
   2231 	case WM_T_82580:
   2232 	case WM_T_I350:
   2233 	case WM_T_I354:
   2234 	case WM_T_80003:
   2235 		/* SPI */
   2236 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2237 		wm_nvm_set_addrbits_size_eecd(sc);
   2238 		if ((sc->sc_type == WM_T_80003)
   2239 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2240 			sc->nvm.read = wm_nvm_read_eerd;
   2241 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2242 		} else {
   2243 			sc->nvm.read = wm_nvm_read_spi;
   2244 			sc->sc_flags |= WM_F_LOCK_EECD;
   2245 		}
   2246 		sc->phy.acquire = wm_get_phy_82575;
   2247 		sc->phy.release = wm_put_phy_82575;
   2248 		sc->nvm.acquire = wm_get_nvm_80003;
   2249 		sc->nvm.release = wm_put_nvm_80003;
   2250 		break;
   2251 	case WM_T_ICH8:
   2252 	case WM_T_ICH9:
   2253 	case WM_T_ICH10:
   2254 	case WM_T_PCH:
   2255 	case WM_T_PCH2:
   2256 	case WM_T_PCH_LPT:
   2257 		sc->nvm.read = wm_nvm_read_ich8;
   2258 		/* FLASH */
   2259 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2260 		sc->sc_nvm_wordsize = 2048;
   2261 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2262 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2263 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2264 			aprint_error_dev(sc->sc_dev,
   2265 			    "can't map FLASH registers\n");
   2266 			goto out;
   2267 		}
   2268 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2269 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2270 		    ICH_FLASH_SECTOR_SIZE;
   2271 		sc->sc_ich8_flash_bank_size =
   2272 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2273 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2274 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2275 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2276 		sc->sc_flashreg_offset = 0;
   2277 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2278 		sc->phy.release = wm_put_swflag_ich8lan;
   2279 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2280 		sc->nvm.release = wm_put_nvm_ich8lan;
   2281 		break;
   2282 	case WM_T_PCH_SPT:
   2283 	case WM_T_PCH_CNP:
   2284 		sc->nvm.read = wm_nvm_read_spt;
   2285 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2286 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2287 		sc->sc_flasht = sc->sc_st;
   2288 		sc->sc_flashh = sc->sc_sh;
   2289 		sc->sc_ich8_flash_base = 0;
   2290 		sc->sc_nvm_wordsize =
   2291 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2292 		    * NVM_SIZE_MULTIPLIER;
   2293 		/* It is size in bytes, we want words */
   2294 		sc->sc_nvm_wordsize /= 2;
   2295 		/* assume 2 banks */
   2296 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2297 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2298 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2299 		sc->phy.release = wm_put_swflag_ich8lan;
   2300 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2301 		sc->nvm.release = wm_put_nvm_ich8lan;
   2302 		break;
   2303 	case WM_T_I210:
   2304 	case WM_T_I211:
   2305 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2306 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2307 		if (wm_nvm_flash_presence_i210(sc)) {
   2308 			sc->nvm.read = wm_nvm_read_eerd;
   2309 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2310 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2311 			wm_nvm_set_addrbits_size_eecd(sc);
   2312 		} else {
   2313 			sc->nvm.read = wm_nvm_read_invm;
   2314 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2315 			sc->sc_nvm_wordsize = INVM_SIZE;
   2316 		}
   2317 		sc->phy.acquire = wm_get_phy_82575;
   2318 		sc->phy.release = wm_put_phy_82575;
   2319 		sc->nvm.acquire = wm_get_nvm_80003;
   2320 		sc->nvm.release = wm_put_nvm_80003;
   2321 		break;
   2322 	default:
   2323 		break;
   2324 	}
   2325 
   2326 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2327 	switch (sc->sc_type) {
   2328 	case WM_T_82571:
   2329 	case WM_T_82572:
   2330 		reg = CSR_READ(sc, WMREG_SWSM2);
   2331 		if ((reg & SWSM2_LOCK) == 0) {
   2332 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2333 			force_clear_smbi = true;
   2334 		} else
   2335 			force_clear_smbi = false;
   2336 		break;
   2337 	case WM_T_82573:
   2338 	case WM_T_82574:
   2339 	case WM_T_82583:
   2340 		force_clear_smbi = true;
   2341 		break;
   2342 	default:
   2343 		force_clear_smbi = false;
   2344 		break;
   2345 	}
   2346 	if (force_clear_smbi) {
   2347 		reg = CSR_READ(sc, WMREG_SWSM);
   2348 		if ((reg & SWSM_SMBI) != 0)
   2349 			aprint_error_dev(sc->sc_dev,
   2350 			    "Please update the Bootagent\n");
   2351 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2352 	}
   2353 
   2354 	/*
   2355 	 * Defer printing the EEPROM type until after verifying the checksum
   2356 	 * This allows the EEPROM type to be printed correctly in the case
   2357 	 * that no EEPROM is attached.
   2358 	 */
   2359 	/*
   2360 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2361 	 * this for later, so we can fail future reads from the EEPROM.
   2362 	 */
   2363 	if (wm_nvm_validate_checksum(sc)) {
   2364 		/*
   2365 		 * Read twice again because some PCI-e parts fail the
   2366 		 * first check due to the link being in sleep state.
   2367 		 */
   2368 		if (wm_nvm_validate_checksum(sc))
   2369 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2370 	}
   2371 
   2372 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2373 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2374 	else {
   2375 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2376 		    sc->sc_nvm_wordsize);
   2377 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2378 			aprint_verbose("iNVM");
   2379 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2380 			aprint_verbose("FLASH(HW)");
   2381 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2382 			aprint_verbose("FLASH");
   2383 		else {
   2384 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2385 				eetype = "SPI";
   2386 			else
   2387 				eetype = "MicroWire";
   2388 			aprint_verbose("(%d address bits) %s EEPROM",
   2389 			    sc->sc_nvm_addrbits, eetype);
   2390 		}
   2391 	}
   2392 	wm_nvm_version(sc);
   2393 	aprint_verbose("\n");
   2394 
   2395 	/*
   2396 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2397 	 * incorrect.
   2398 	 */
   2399 	wm_gmii_setup_phytype(sc, 0, 0);
   2400 
   2401 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2402 	switch (sc->sc_type) {
   2403 	case WM_T_ICH8:
   2404 	case WM_T_ICH9:
   2405 	case WM_T_ICH10:
   2406 	case WM_T_PCH:
   2407 	case WM_T_PCH2:
   2408 	case WM_T_PCH_LPT:
   2409 	case WM_T_PCH_SPT:
   2410 	case WM_T_PCH_CNP:
   2411 		apme_mask = WUC_APME;
   2412 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2413 		if ((eeprom_data & apme_mask) != 0)
   2414 			sc->sc_flags |= WM_F_WOL;
   2415 		break;
   2416 	default:
   2417 		break;
   2418 	}
   2419 
   2420 	/* Reset the chip to a known state. */
   2421 	wm_reset(sc);
   2422 
   2423 	/*
   2424 	 * Check for I21[01] PLL workaround.
   2425 	 *
   2426 	 * Three cases:
   2427 	 * a) Chip is I211.
   2428 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2429 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2430 	 */
   2431 	if (sc->sc_type == WM_T_I211)
   2432 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2433 	if (sc->sc_type == WM_T_I210) {
   2434 		if (!wm_nvm_flash_presence_i210(sc))
   2435 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2436 		else if ((sc->sc_nvm_ver_major < 3)
   2437 		    || ((sc->sc_nvm_ver_major == 3)
   2438 			&& (sc->sc_nvm_ver_minor < 25))) {
   2439 			aprint_verbose_dev(sc->sc_dev,
   2440 			    "ROM image version %d.%d is older than 3.25\n",
   2441 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2442 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2443 		}
   2444 	}
   2445 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2446 		wm_pll_workaround_i210(sc);
   2447 
   2448 	wm_get_wakeup(sc);
   2449 
   2450 	/* Non-AMT based hardware can now take control from firmware */
   2451 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2452 		wm_get_hw_control(sc);
   2453 
   2454 	/*
   2455 	 * Read the Ethernet address from the EEPROM, if not first found
   2456 	 * in device properties.
   2457 	 */
   2458 	ea = prop_dictionary_get(dict, "mac-address");
   2459 	if (ea != NULL) {
   2460 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2461 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2462 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2463 	} else {
   2464 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2465 			aprint_error_dev(sc->sc_dev,
   2466 			    "unable to read Ethernet address\n");
   2467 			goto out;
   2468 		}
   2469 	}
   2470 
   2471 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2472 	    ether_sprintf(enaddr));
   2473 
   2474 	/*
   2475 	 * Read the config info from the EEPROM, and set up various
   2476 	 * bits in the control registers based on their contents.
   2477 	 */
   2478 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2479 	if (pn != NULL) {
   2480 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2481 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2482 	} else {
   2483 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2484 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2485 			goto out;
   2486 		}
   2487 	}
   2488 
   2489 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2490 	if (pn != NULL) {
   2491 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2492 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2493 	} else {
   2494 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2495 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2496 			goto out;
   2497 		}
   2498 	}
   2499 
   2500 	/* check for WM_F_WOL */
   2501 	switch (sc->sc_type) {
   2502 	case WM_T_82542_2_0:
   2503 	case WM_T_82542_2_1:
   2504 	case WM_T_82543:
   2505 		/* dummy? */
   2506 		eeprom_data = 0;
   2507 		apme_mask = NVM_CFG3_APME;
   2508 		break;
   2509 	case WM_T_82544:
   2510 		apme_mask = NVM_CFG2_82544_APM_EN;
   2511 		eeprom_data = cfg2;
   2512 		break;
   2513 	case WM_T_82546:
   2514 	case WM_T_82546_3:
   2515 	case WM_T_82571:
   2516 	case WM_T_82572:
   2517 	case WM_T_82573:
   2518 	case WM_T_82574:
   2519 	case WM_T_82583:
   2520 	case WM_T_80003:
   2521 	case WM_T_82575:
   2522 	case WM_T_82576:
   2523 		apme_mask = NVM_CFG3_APME;
   2524 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2525 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2526 		break;
   2527 	case WM_T_82580:
   2528 	case WM_T_I350:
   2529 	case WM_T_I354:
   2530 	case WM_T_I210:
   2531 	case WM_T_I211:
   2532 		apme_mask = NVM_CFG3_APME;
   2533 		wm_nvm_read(sc,
   2534 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2535 		    1, &eeprom_data);
   2536 		break;
   2537 	case WM_T_ICH8:
   2538 	case WM_T_ICH9:
   2539 	case WM_T_ICH10:
   2540 	case WM_T_PCH:
   2541 	case WM_T_PCH2:
   2542 	case WM_T_PCH_LPT:
   2543 	case WM_T_PCH_SPT:
   2544 	case WM_T_PCH_CNP:
   2545 		/* Already checked before wm_reset () */
   2546 		apme_mask = eeprom_data = 0;
   2547 		break;
   2548 	default: /* XXX 82540 */
   2549 		apme_mask = NVM_CFG3_APME;
   2550 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2551 		break;
   2552 	}
   2553 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2554 	if ((eeprom_data & apme_mask) != 0)
   2555 		sc->sc_flags |= WM_F_WOL;
   2556 
   2557 	/*
   2558 	 * We have the eeprom settings, now apply the special cases
   2559 	 * where the eeprom may be wrong or the board won't support
   2560 	 * wake on lan on a particular port
   2561 	 */
   2562 	switch (sc->sc_pcidevid) {
   2563 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2564 		sc->sc_flags &= ~WM_F_WOL;
   2565 		break;
   2566 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2567 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2568 		/* Wake events only supported on port A for dual fiber
   2569 		 * regardless of eeprom setting */
   2570 		if (sc->sc_funcid == 1)
   2571 			sc->sc_flags &= ~WM_F_WOL;
   2572 		break;
   2573 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2574 		/* if quad port adapter, disable WoL on all but port A */
   2575 		if (sc->sc_funcid != 0)
   2576 			sc->sc_flags &= ~WM_F_WOL;
   2577 		break;
   2578 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2579 		/* Wake events only supported on port A for dual fiber
   2580 		 * regardless of eeprom setting */
   2581 		if (sc->sc_funcid == 1)
   2582 			sc->sc_flags &= ~WM_F_WOL;
   2583 		break;
   2584 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2585 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2586 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2587 		/* if quad port adapter, disable WoL on all but port A */
   2588 		if (sc->sc_funcid != 0)
   2589 			sc->sc_flags &= ~WM_F_WOL;
   2590 		break;
   2591 	}
   2592 
   2593 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2594 		/* Check NVM for autonegotiation */
   2595 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2596 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2597 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2598 		}
   2599 	}
   2600 
   2601 	/*
   2602 	 * XXX need special handling for some multiple port cards
   2603 	 * to disable a paticular port.
   2604 	 */
   2605 
   2606 	if (sc->sc_type >= WM_T_82544) {
   2607 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2608 		if (pn != NULL) {
   2609 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2610 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2611 		} else {
   2612 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2613 				aprint_error_dev(sc->sc_dev,
   2614 				    "unable to read SWDPIN\n");
   2615 				goto out;
   2616 			}
   2617 		}
   2618 	}
   2619 
   2620 	if (cfg1 & NVM_CFG1_ILOS)
   2621 		sc->sc_ctrl |= CTRL_ILOS;
   2622 
   2623 	/*
   2624 	 * XXX
   2625 	 * This code isn't correct because pin 2 and 3 are located
   2626 	 * in different position on newer chips. Check all datasheet.
   2627 	 *
   2628 	 * Until resolve this problem, check if a chip < 82580
   2629 	 */
   2630 	if (sc->sc_type <= WM_T_82580) {
   2631 		if (sc->sc_type >= WM_T_82544) {
   2632 			sc->sc_ctrl |=
   2633 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2634 			    CTRL_SWDPIO_SHIFT;
   2635 			sc->sc_ctrl |=
   2636 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2637 			    CTRL_SWDPINS_SHIFT;
   2638 		} else {
   2639 			sc->sc_ctrl |=
   2640 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2641 			    CTRL_SWDPIO_SHIFT;
   2642 		}
   2643 	}
   2644 
   2645 	/* XXX For other than 82580? */
   2646 	if (sc->sc_type == WM_T_82580) {
   2647 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2648 		if (nvmword & __BIT(13))
   2649 			sc->sc_ctrl |= CTRL_ILOS;
   2650 	}
   2651 
   2652 #if 0
   2653 	if (sc->sc_type >= WM_T_82544) {
   2654 		if (cfg1 & NVM_CFG1_IPS0)
   2655 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2656 		if (cfg1 & NVM_CFG1_IPS1)
   2657 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2658 		sc->sc_ctrl_ext |=
   2659 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2660 		    CTRL_EXT_SWDPIO_SHIFT;
   2661 		sc->sc_ctrl_ext |=
   2662 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2663 		    CTRL_EXT_SWDPINS_SHIFT;
   2664 	} else {
   2665 		sc->sc_ctrl_ext |=
   2666 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2667 		    CTRL_EXT_SWDPIO_SHIFT;
   2668 	}
   2669 #endif
   2670 
   2671 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2672 #if 0
   2673 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2674 #endif
   2675 
   2676 	if (sc->sc_type == WM_T_PCH) {
   2677 		uint16_t val;
   2678 
   2679 		/* Save the NVM K1 bit setting */
   2680 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2681 
   2682 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2683 			sc->sc_nvm_k1_enabled = 1;
   2684 		else
   2685 			sc->sc_nvm_k1_enabled = 0;
   2686 	}
   2687 
   2688 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2689 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2690 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2691 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2692 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2693 	    || sc->sc_type == WM_T_82573
   2694 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2695 		/* Copper only */
   2696 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2697 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2698 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2699 	    || (sc->sc_type ==WM_T_I211)) {
   2700 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2701 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2702 		switch (link_mode) {
   2703 		case CTRL_EXT_LINK_MODE_1000KX:
   2704 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2705 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2706 			break;
   2707 		case CTRL_EXT_LINK_MODE_SGMII:
   2708 			if (wm_sgmii_uses_mdio(sc)) {
   2709 				aprint_verbose_dev(sc->sc_dev,
   2710 				    "SGMII(MDIO)\n");
   2711 				sc->sc_flags |= WM_F_SGMII;
   2712 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2713 				break;
   2714 			}
   2715 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2716 			/*FALLTHROUGH*/
   2717 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2718 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2719 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2720 				if (link_mode
   2721 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2722 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2723 					sc->sc_flags |= WM_F_SGMII;
   2724 				} else {
   2725 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2726 					aprint_verbose_dev(sc->sc_dev,
   2727 					    "SERDES\n");
   2728 				}
   2729 				break;
   2730 			}
   2731 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2732 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2733 
   2734 			/* Change current link mode setting */
   2735 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2736 			switch (sc->sc_mediatype) {
   2737 			case WM_MEDIATYPE_COPPER:
   2738 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2739 				break;
   2740 			case WM_MEDIATYPE_SERDES:
   2741 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2742 				break;
   2743 			default:
   2744 				break;
   2745 			}
   2746 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2747 			break;
   2748 		case CTRL_EXT_LINK_MODE_GMII:
   2749 		default:
   2750 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2751 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2752 			break;
   2753 		}
   2754 
   2755 		reg &= ~CTRL_EXT_I2C_ENA;
   2756 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2757 			reg |= CTRL_EXT_I2C_ENA;
   2758 		else
   2759 			reg &= ~CTRL_EXT_I2C_ENA;
   2760 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2761 	} else if (sc->sc_type < WM_T_82543 ||
   2762 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2763 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2764 			aprint_error_dev(sc->sc_dev,
   2765 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2766 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2767 		}
   2768 	} else {
   2769 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2770 			aprint_error_dev(sc->sc_dev,
   2771 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2772 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2773 		}
   2774 	}
   2775 
   2776 	if (sc->sc_type >= WM_T_PCH2)
   2777 		sc->sc_flags |= WM_F_EEE;
   2778 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2779 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2780 		/* XXX: Need special handling for I354. (not yet) */
   2781 		if (sc->sc_type != WM_T_I354)
   2782 			sc->sc_flags |= WM_F_EEE;
   2783 	}
   2784 
   2785 	/* Set device properties (macflags) */
   2786 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2787 
   2788 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2789 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2790 
   2791 	/* Initialize the media structures accordingly. */
   2792 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2793 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2794 	else
   2795 		wm_tbi_mediainit(sc); /* All others */
   2796 
   2797 	ifp = &sc->sc_ethercom.ec_if;
   2798 	xname = device_xname(sc->sc_dev);
   2799 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2800 	ifp->if_softc = sc;
   2801 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2802 #ifdef WM_MPSAFE
   2803 	ifp->if_extflags = IFEF_MPSAFE;
   2804 #endif
   2805 	ifp->if_ioctl = wm_ioctl;
   2806 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2807 		ifp->if_start = wm_nq_start;
   2808 		/*
   2809 		 * When the number of CPUs is one and the controller can use
   2810 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2811 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2812 		 * and the other is used for link status changing.
   2813 		 * In this situation, wm_nq_transmit() is disadvantageous
   2814 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2815 		 */
   2816 		if (wm_is_using_multiqueue(sc))
   2817 			ifp->if_transmit = wm_nq_transmit;
   2818 	} else {
   2819 		ifp->if_start = wm_start;
   2820 		/*
   2821 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2822 		 */
   2823 		if (wm_is_using_multiqueue(sc))
   2824 			ifp->if_transmit = wm_transmit;
   2825 	}
   2826 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2827 	ifp->if_init = wm_init;
   2828 	ifp->if_stop = wm_stop;
   2829 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2830 	IFQ_SET_READY(&ifp->if_snd);
   2831 
   2832 	/* Check for jumbo frame */
   2833 	switch (sc->sc_type) {
   2834 	case WM_T_82573:
   2835 		/* XXX limited to 9234 if ASPM is disabled */
   2836 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2837 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2838 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2839 		break;
   2840 	case WM_T_82571:
   2841 	case WM_T_82572:
   2842 	case WM_T_82574:
   2843 	case WM_T_82583:
   2844 	case WM_T_82575:
   2845 	case WM_T_82576:
   2846 	case WM_T_82580:
   2847 	case WM_T_I350:
   2848 	case WM_T_I354:
   2849 	case WM_T_I210:
   2850 	case WM_T_I211:
   2851 	case WM_T_80003:
   2852 	case WM_T_ICH9:
   2853 	case WM_T_ICH10:
   2854 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2855 	case WM_T_PCH_LPT:
   2856 	case WM_T_PCH_SPT:
   2857 	case WM_T_PCH_CNP:
   2858 		/* XXX limited to 9234 */
   2859 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2860 		break;
   2861 	case WM_T_PCH:
   2862 		/* XXX limited to 4096 */
   2863 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2864 		break;
   2865 	case WM_T_82542_2_0:
   2866 	case WM_T_82542_2_1:
   2867 	case WM_T_ICH8:
   2868 		/* No support for jumbo frame */
   2869 		break;
   2870 	default:
   2871 		/* ETHER_MAX_LEN_JUMBO */
   2872 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2873 		break;
   2874 	}
   2875 
   2876 	/* If we're a i82543 or greater, we can support VLANs. */
   2877 	if (sc->sc_type >= WM_T_82543)
   2878 		sc->sc_ethercom.ec_capabilities |=
   2879 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2880 
   2881 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2882 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2883 
   2884 	/*
   2885 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2886 	 * on i82543 and later.
   2887 	 */
   2888 	if (sc->sc_type >= WM_T_82543) {
   2889 		ifp->if_capabilities |=
   2890 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2891 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2892 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2893 		    IFCAP_CSUM_TCPv6_Tx |
   2894 		    IFCAP_CSUM_UDPv6_Tx;
   2895 	}
   2896 
   2897 	/*
   2898 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2899 	 *
   2900 	 *	82541GI (8086:1076) ... no
   2901 	 *	82572EI (8086:10b9) ... yes
   2902 	 */
   2903 	if (sc->sc_type >= WM_T_82571) {
   2904 		ifp->if_capabilities |=
   2905 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2906 	}
   2907 
   2908 	/*
   2909 	 * If we're a i82544 or greater (except i82547), we can do
   2910 	 * TCP segmentation offload.
   2911 	 */
   2912 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2913 		ifp->if_capabilities |= IFCAP_TSOv4;
   2914 	}
   2915 
   2916 	if (sc->sc_type >= WM_T_82571) {
   2917 		ifp->if_capabilities |= IFCAP_TSOv6;
   2918 	}
   2919 
   2920 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2921 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2922 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2923 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2924 
   2925 #ifdef WM_MPSAFE
   2926 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2927 #else
   2928 	sc->sc_core_lock = NULL;
   2929 #endif
   2930 
   2931 	/* Attach the interface. */
   2932 	error = if_initialize(ifp);
   2933 	if (error != 0) {
   2934 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2935 		    error);
   2936 		return; /* Error */
   2937 	}
   2938 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2939 	ether_ifattach(ifp, enaddr);
   2940 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2941 	if_register(ifp);
   2942 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2943 	    RND_FLAG_DEFAULT);
   2944 
   2945 #ifdef WM_EVENT_COUNTERS
   2946 	/* Attach event counters. */
   2947 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2948 	    NULL, xname, "linkintr");
   2949 
   2950 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2951 	    NULL, xname, "tx_xoff");
   2952 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2953 	    NULL, xname, "tx_xon");
   2954 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2955 	    NULL, xname, "rx_xoff");
   2956 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2957 	    NULL, xname, "rx_xon");
   2958 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2959 	    NULL, xname, "rx_macctl");
   2960 #endif /* WM_EVENT_COUNTERS */
   2961 
   2962 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2963 		pmf_class_network_register(self, ifp);
   2964 	else
   2965 		aprint_error_dev(self, "couldn't establish power handler\n");
   2966 
   2967 	sc->sc_flags |= WM_F_ATTACHED;
   2968 out:
   2969 	return;
   2970 }
   2971 
   2972 /* The detach function (ca_detach) */
   2973 static int
   2974 wm_detach(device_t self, int flags __unused)
   2975 {
   2976 	struct wm_softc *sc = device_private(self);
   2977 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2978 	int i;
   2979 
   2980 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2981 		return 0;
   2982 
   2983 	/* Stop the interface. Callouts are stopped in it. */
   2984 	wm_stop(ifp, 1);
   2985 
   2986 	pmf_device_deregister(self);
   2987 
   2988 #ifdef WM_EVENT_COUNTERS
   2989 	evcnt_detach(&sc->sc_ev_linkintr);
   2990 
   2991 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2992 	evcnt_detach(&sc->sc_ev_tx_xon);
   2993 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2994 	evcnt_detach(&sc->sc_ev_rx_xon);
   2995 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2996 #endif /* WM_EVENT_COUNTERS */
   2997 
   2998 	rnd_detach_source(&sc->rnd_source);
   2999 
   3000 	/* Tell the firmware about the release */
   3001 	WM_CORE_LOCK(sc);
   3002 	wm_release_manageability(sc);
   3003 	wm_release_hw_control(sc);
   3004 	wm_enable_wakeup(sc);
   3005 	WM_CORE_UNLOCK(sc);
   3006 
   3007 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3008 
   3009 	/* Delete all remaining media. */
   3010 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3011 
   3012 	ether_ifdetach(ifp);
   3013 	if_detach(ifp);
   3014 	if_percpuq_destroy(sc->sc_ipq);
   3015 
   3016 	/* Unload RX dmamaps and free mbufs */
   3017 	for (i = 0; i < sc->sc_nqueues; i++) {
   3018 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3019 		mutex_enter(rxq->rxq_lock);
   3020 		wm_rxdrain(rxq);
   3021 		mutex_exit(rxq->rxq_lock);
   3022 	}
   3023 	/* Must unlock here */
   3024 
   3025 	/* Disestablish the interrupt handler */
   3026 	for (i = 0; i < sc->sc_nintrs; i++) {
   3027 		if (sc->sc_ihs[i] != NULL) {
   3028 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3029 			sc->sc_ihs[i] = NULL;
   3030 		}
   3031 	}
   3032 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3033 
   3034 	wm_free_txrx_queues(sc);
   3035 
   3036 	/* Unmap the registers */
   3037 	if (sc->sc_ss) {
   3038 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3039 		sc->sc_ss = 0;
   3040 	}
   3041 	if (sc->sc_ios) {
   3042 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3043 		sc->sc_ios = 0;
   3044 	}
   3045 	if (sc->sc_flashs) {
   3046 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3047 		sc->sc_flashs = 0;
   3048 	}
   3049 
   3050 	if (sc->sc_core_lock)
   3051 		mutex_obj_free(sc->sc_core_lock);
   3052 	if (sc->sc_ich_phymtx)
   3053 		mutex_obj_free(sc->sc_ich_phymtx);
   3054 	if (sc->sc_ich_nvmmtx)
   3055 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3056 
   3057 	return 0;
   3058 }
   3059 
   3060 static bool
   3061 wm_suspend(device_t self, const pmf_qual_t *qual)
   3062 {
   3063 	struct wm_softc *sc = device_private(self);
   3064 
   3065 	wm_release_manageability(sc);
   3066 	wm_release_hw_control(sc);
   3067 	wm_enable_wakeup(sc);
   3068 
   3069 	return true;
   3070 }
   3071 
   3072 static bool
   3073 wm_resume(device_t self, const pmf_qual_t *qual)
   3074 {
   3075 	struct wm_softc *sc = device_private(self);
   3076 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3077 	pcireg_t reg;
   3078 	char buf[256];
   3079 
   3080 	reg = CSR_READ(sc, WMREG_WUS);
   3081 	if (reg != 0) {
   3082 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3083 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3084 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3085 	}
   3086 
   3087 	if (sc->sc_type >= WM_T_PCH2)
   3088 		wm_resume_workarounds_pchlan(sc);
   3089 	if ((ifp->if_flags & IFF_UP) == 0) {
   3090 		wm_reset(sc);
   3091 		/* Non-AMT based hardware can now take control from firmware */
   3092 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3093 			wm_get_hw_control(sc);
   3094 		wm_init_manageability(sc);
   3095 	} else {
   3096 		/*
   3097 		 * We called pmf_class_network_register(), so if_init() is
   3098 		 * automatically called when IFF_UP. wm_reset(),
   3099 		 * wm_get_hw_control() and wm_init_manageability() are called
   3100 		 * via wm_init().
   3101 		 */
   3102 	}
   3103 
   3104 	return true;
   3105 }
   3106 
   3107 /*
   3108  * wm_watchdog:		[ifnet interface function]
   3109  *
   3110  *	Watchdog timer handler.
   3111  */
   3112 static void
   3113 wm_watchdog(struct ifnet *ifp)
   3114 {
   3115 	int qid;
   3116 	struct wm_softc *sc = ifp->if_softc;
   3117 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3118 
   3119 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3120 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3121 
   3122 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3123 	}
   3124 
   3125 	/*
   3126 	 * IF any of queues hanged up, reset the interface.
   3127 	 */
   3128 	if (hang_queue != 0) {
   3129 		(void) wm_init(ifp);
   3130 
   3131 		/*
   3132 		 * There are still some upper layer processing which call
   3133 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3134 		 */
   3135 		/* Try to get more packets going. */
   3136 		ifp->if_start(ifp);
   3137 	}
   3138 }
   3139 
   3140 
   3141 static void
   3142 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3143 {
   3144 
   3145 	mutex_enter(txq->txq_lock);
   3146 	if (txq->txq_sending &&
   3147 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3148 		wm_watchdog_txq_locked(ifp, txq, hang);
   3149 	}
   3150 	mutex_exit(txq->txq_lock);
   3151 }
   3152 
   3153 static void
   3154 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3155     uint16_t *hang)
   3156 {
   3157 	struct wm_softc *sc = ifp->if_softc;
   3158 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3159 
   3160 	KASSERT(mutex_owned(txq->txq_lock));
   3161 
   3162 	/*
   3163 	 * Since we're using delayed interrupts, sweep up
   3164 	 * before we report an error.
   3165 	 */
   3166 	wm_txeof(txq, UINT_MAX);
   3167 
   3168 	if (txq->txq_sending)
   3169 		*hang |= __BIT(wmq->wmq_id);
   3170 
   3171 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3172 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3173 		    device_xname(sc->sc_dev));
   3174 	} else {
   3175 #ifdef WM_DEBUG
   3176 		int i, j;
   3177 		struct wm_txsoft *txs;
   3178 #endif
   3179 		log(LOG_ERR,
   3180 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3181 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3182 		    txq->txq_next);
   3183 		ifp->if_oerrors++;
   3184 #ifdef WM_DEBUG
   3185 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3186 		    i = WM_NEXTTXS(txq, i)) {
   3187 		    txs = &txq->txq_soft[i];
   3188 		    printf("txs %d tx %d -> %d\n",
   3189 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3190 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3191 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3192 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3193 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3194 				    printf("\t %#08x%08x\n",
   3195 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3196 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3197 			    } else {
   3198 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3199 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3200 					txq->txq_descs[j].wtx_addr.wa_low);
   3201 				    printf("\t %#04x%02x%02x%08x\n",
   3202 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3203 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3204 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3205 					txq->txq_descs[j].wtx_cmdlen);
   3206 			    }
   3207 			if (j == txs->txs_lastdesc)
   3208 				break;
   3209 			}
   3210 		}
   3211 #endif
   3212 	}
   3213 }
   3214 
   3215 /*
   3216  * wm_tick:
   3217  *
   3218  *	One second timer, used to check link status, sweep up
   3219  *	completed transmit jobs, etc.
   3220  */
   3221 static void
   3222 wm_tick(void *arg)
   3223 {
   3224 	struct wm_softc *sc = arg;
   3225 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3226 #ifndef WM_MPSAFE
   3227 	int s = splnet();
   3228 #endif
   3229 
   3230 	WM_CORE_LOCK(sc);
   3231 
   3232 	if (sc->sc_core_stopping) {
   3233 		WM_CORE_UNLOCK(sc);
   3234 #ifndef WM_MPSAFE
   3235 		splx(s);
   3236 #endif
   3237 		return;
   3238 	}
   3239 
   3240 	if (sc->sc_type >= WM_T_82542_2_1) {
   3241 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3242 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3243 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3244 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3245 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3246 	}
   3247 
   3248 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3249 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3250 	    + CSR_READ(sc, WMREG_CRCERRS)
   3251 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3252 	    + CSR_READ(sc, WMREG_SYMERRC)
   3253 	    + CSR_READ(sc, WMREG_RXERRC)
   3254 	    + CSR_READ(sc, WMREG_SEC)
   3255 	    + CSR_READ(sc, WMREG_CEXTERR)
   3256 	    + CSR_READ(sc, WMREG_RLEC);
   3257 	/*
   3258 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3259 	 * memory. It does not mean the number of dropped packet. Because
   3260 	 * ethernet controller can receive packets in such case if there is
   3261 	 * space in phy's FIFO.
   3262 	 *
   3263 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3264 	 * own EVCNT instead of if_iqdrops.
   3265 	 */
   3266 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3267 
   3268 	if (sc->sc_flags & WM_F_HAS_MII)
   3269 		mii_tick(&sc->sc_mii);
   3270 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3271 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3272 		wm_serdes_tick(sc);
   3273 	else
   3274 		wm_tbi_tick(sc);
   3275 
   3276 	WM_CORE_UNLOCK(sc);
   3277 
   3278 	wm_watchdog(ifp);
   3279 
   3280 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3281 }
   3282 
   3283 static int
   3284 wm_ifflags_cb(struct ethercom *ec)
   3285 {
   3286 	struct ifnet *ifp = &ec->ec_if;
   3287 	struct wm_softc *sc = ifp->if_softc;
   3288 	int iffchange, ecchange;
   3289 	bool needreset = false;
   3290 	int rc = 0;
   3291 
   3292 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3293 		device_xname(sc->sc_dev), __func__));
   3294 
   3295 	WM_CORE_LOCK(sc);
   3296 
   3297 	/*
   3298 	 * Check for if_flags.
   3299 	 * Main usage is to prevent linkdown when opening bpf.
   3300 	 */
   3301 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3302 	sc->sc_if_flags = ifp->if_flags;
   3303 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3304 		needreset = true;
   3305 		goto ec;
   3306 	}
   3307 
   3308 	/* iff related updates */
   3309 	if ((iffchange & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3310 		wm_set_filter(sc);
   3311 
   3312 	wm_set_vlan(sc);
   3313 
   3314 ec:
   3315 	/* Check for ec_capenable. */
   3316 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3317 	sc->sc_ec_capenable = ec->ec_capenable;
   3318 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3319 		needreset = true;
   3320 		goto out;
   3321 	}
   3322 
   3323 	/* ec related updates */
   3324 	wm_set_eee(sc);
   3325 
   3326 out:
   3327 	if (needreset)
   3328 		rc = ENETRESET;
   3329 	WM_CORE_UNLOCK(sc);
   3330 
   3331 	return rc;
   3332 }
   3333 
   3334 /*
   3335  * wm_ioctl:		[ifnet interface function]
   3336  *
   3337  *	Handle control requests from the operator.
   3338  */
   3339 static int
   3340 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3341 {
   3342 	struct wm_softc *sc = ifp->if_softc;
   3343 	struct ifreq *ifr = (struct ifreq *) data;
   3344 	struct ifaddr *ifa = (struct ifaddr *)data;
   3345 	struct sockaddr_dl *sdl;
   3346 	int s, error;
   3347 
   3348 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3349 		device_xname(sc->sc_dev), __func__));
   3350 
   3351 #ifndef WM_MPSAFE
   3352 	s = splnet();
   3353 #endif
   3354 	switch (cmd) {
   3355 	case SIOCSIFMEDIA:
   3356 	case SIOCGIFMEDIA:
   3357 		WM_CORE_LOCK(sc);
   3358 		/* Flow control requires full-duplex mode. */
   3359 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3360 		    (ifr->ifr_media & IFM_FDX) == 0)
   3361 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3362 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3363 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3364 				/* We can do both TXPAUSE and RXPAUSE. */
   3365 				ifr->ifr_media |=
   3366 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3367 			}
   3368 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3369 		}
   3370 		WM_CORE_UNLOCK(sc);
   3371 #ifdef WM_MPSAFE
   3372 		s = splnet();
   3373 #endif
   3374 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3375 #ifdef WM_MPSAFE
   3376 		splx(s);
   3377 #endif
   3378 		break;
   3379 	case SIOCINITIFADDR:
   3380 		WM_CORE_LOCK(sc);
   3381 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3382 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3383 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3384 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3385 			/* unicast address is first multicast entry */
   3386 			wm_set_filter(sc);
   3387 			error = 0;
   3388 			WM_CORE_UNLOCK(sc);
   3389 			break;
   3390 		}
   3391 		WM_CORE_UNLOCK(sc);
   3392 		/*FALLTHROUGH*/
   3393 	default:
   3394 #ifdef WM_MPSAFE
   3395 		s = splnet();
   3396 #endif
   3397 		/* It may call wm_start, so unlock here */
   3398 		error = ether_ioctl(ifp, cmd, data);
   3399 #ifdef WM_MPSAFE
   3400 		splx(s);
   3401 #endif
   3402 		if (error != ENETRESET)
   3403 			break;
   3404 
   3405 		error = 0;
   3406 
   3407 		if (cmd == SIOCSIFCAP)
   3408 			error = (*ifp->if_init)(ifp);
   3409 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3410 			;
   3411 		else if (ifp->if_flags & IFF_RUNNING) {
   3412 			/*
   3413 			 * Multicast list has changed; set the hardware filter
   3414 			 * accordingly.
   3415 			 */
   3416 			WM_CORE_LOCK(sc);
   3417 			wm_set_filter(sc);
   3418 			WM_CORE_UNLOCK(sc);
   3419 		}
   3420 		break;
   3421 	}
   3422 
   3423 #ifndef WM_MPSAFE
   3424 	splx(s);
   3425 #endif
   3426 	return error;
   3427 }
   3428 
   3429 /* MAC address related */
   3430 
   3431 /*
   3432  * Get the offset of MAC address and return it.
   3433  * If error occured, use offset 0.
   3434  */
   3435 static uint16_t
   3436 wm_check_alt_mac_addr(struct wm_softc *sc)
   3437 {
   3438 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3439 	uint16_t offset = NVM_OFF_MACADDR;
   3440 
   3441 	/* Try to read alternative MAC address pointer */
   3442 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3443 		return 0;
   3444 
   3445 	/* Check pointer if it's valid or not. */
   3446 	if ((offset == 0x0000) || (offset == 0xffff))
   3447 		return 0;
   3448 
   3449 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3450 	/*
   3451 	 * Check whether alternative MAC address is valid or not.
   3452 	 * Some cards have non 0xffff pointer but those don't use
   3453 	 * alternative MAC address in reality.
   3454 	 *
   3455 	 * Check whether the broadcast bit is set or not.
   3456 	 */
   3457 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3458 		if (((myea[0] & 0xff) & 0x01) == 0)
   3459 			return offset; /* Found */
   3460 
   3461 	/* Not found */
   3462 	return 0;
   3463 }
   3464 
   3465 static int
   3466 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3467 {
   3468 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3469 	uint16_t offset = NVM_OFF_MACADDR;
   3470 	int do_invert = 0;
   3471 
   3472 	switch (sc->sc_type) {
   3473 	case WM_T_82580:
   3474 	case WM_T_I350:
   3475 	case WM_T_I354:
   3476 		/* EEPROM Top Level Partitioning */
   3477 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3478 		break;
   3479 	case WM_T_82571:
   3480 	case WM_T_82575:
   3481 	case WM_T_82576:
   3482 	case WM_T_80003:
   3483 	case WM_T_I210:
   3484 	case WM_T_I211:
   3485 		offset = wm_check_alt_mac_addr(sc);
   3486 		if (offset == 0)
   3487 			if ((sc->sc_funcid & 0x01) == 1)
   3488 				do_invert = 1;
   3489 		break;
   3490 	default:
   3491 		if ((sc->sc_funcid & 0x01) == 1)
   3492 			do_invert = 1;
   3493 		break;
   3494 	}
   3495 
   3496 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3497 		goto bad;
   3498 
   3499 	enaddr[0] = myea[0] & 0xff;
   3500 	enaddr[1] = myea[0] >> 8;
   3501 	enaddr[2] = myea[1] & 0xff;
   3502 	enaddr[3] = myea[1] >> 8;
   3503 	enaddr[4] = myea[2] & 0xff;
   3504 	enaddr[5] = myea[2] >> 8;
   3505 
   3506 	/*
   3507 	 * Toggle the LSB of the MAC address on the second port
   3508 	 * of some dual port cards.
   3509 	 */
   3510 	if (do_invert != 0)
   3511 		enaddr[5] ^= 1;
   3512 
   3513 	return 0;
   3514 
   3515  bad:
   3516 	return -1;
   3517 }
   3518 
   3519 /*
   3520  * wm_set_ral:
   3521  *
   3522  *	Set an entery in the receive address list.
   3523  */
   3524 static void
   3525 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3526 {
   3527 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3528 	uint32_t wlock_mac;
   3529 	int rv;
   3530 
   3531 	if (enaddr != NULL) {
   3532 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3533 		    (enaddr[3] << 24);
   3534 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3535 		ral_hi |= RAL_AV;
   3536 	} else {
   3537 		ral_lo = 0;
   3538 		ral_hi = 0;
   3539 	}
   3540 
   3541 	switch (sc->sc_type) {
   3542 	case WM_T_82542_2_0:
   3543 	case WM_T_82542_2_1:
   3544 	case WM_T_82543:
   3545 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3546 		CSR_WRITE_FLUSH(sc);
   3547 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3548 		CSR_WRITE_FLUSH(sc);
   3549 		break;
   3550 	case WM_T_PCH2:
   3551 	case WM_T_PCH_LPT:
   3552 	case WM_T_PCH_SPT:
   3553 	case WM_T_PCH_CNP:
   3554 		if (idx == 0) {
   3555 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3556 			CSR_WRITE_FLUSH(sc);
   3557 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3558 			CSR_WRITE_FLUSH(sc);
   3559 			return;
   3560 		}
   3561 		if (sc->sc_type != WM_T_PCH2) {
   3562 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3563 			    FWSM_WLOCK_MAC);
   3564 			addrl = WMREG_SHRAL(idx - 1);
   3565 			addrh = WMREG_SHRAH(idx - 1);
   3566 		} else {
   3567 			wlock_mac = 0;
   3568 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3569 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3570 		}
   3571 
   3572 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3573 			rv = wm_get_swflag_ich8lan(sc);
   3574 			if (rv != 0)
   3575 				return;
   3576 			CSR_WRITE(sc, addrl, ral_lo);
   3577 			CSR_WRITE_FLUSH(sc);
   3578 			CSR_WRITE(sc, addrh, ral_hi);
   3579 			CSR_WRITE_FLUSH(sc);
   3580 			wm_put_swflag_ich8lan(sc);
   3581 		}
   3582 
   3583 		break;
   3584 	default:
   3585 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3586 		CSR_WRITE_FLUSH(sc);
   3587 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3588 		CSR_WRITE_FLUSH(sc);
   3589 		break;
   3590 	}
   3591 }
   3592 
   3593 /*
   3594  * wm_mchash:
   3595  *
   3596  *	Compute the hash of the multicast address for the 4096-bit
   3597  *	multicast filter.
   3598  */
   3599 static uint32_t
   3600 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3601 {
   3602 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3603 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3604 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3605 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3606 	uint32_t hash;
   3607 
   3608 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3609 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3610 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3611 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3612 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3613 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3614 		return (hash & 0x3ff);
   3615 	}
   3616 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3617 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3618 
   3619 	return (hash & 0xfff);
   3620 }
   3621 
   3622 /*
   3623  *
   3624  *
   3625  */
   3626 static int
   3627 wm_rar_count(struct wm_softc *sc)
   3628 {
   3629 	int size;
   3630 
   3631 	switch (sc->sc_type) {
   3632 	case WM_T_ICH8:
   3633 		size = WM_RAL_TABSIZE_ICH8 -1;
   3634 		break;
   3635 	case WM_T_ICH9:
   3636 	case WM_T_ICH10:
   3637 	case WM_T_PCH:
   3638 		size = WM_RAL_TABSIZE_ICH8;
   3639 		break;
   3640 	case WM_T_PCH2:
   3641 		size = WM_RAL_TABSIZE_PCH2;
   3642 		break;
   3643 	case WM_T_PCH_LPT:
   3644 	case WM_T_PCH_SPT:
   3645 	case WM_T_PCH_CNP:
   3646 		size = WM_RAL_TABSIZE_PCH_LPT;
   3647 		break;
   3648 	case WM_T_82575:
   3649 	case WM_T_I210:
   3650 	case WM_T_I211:
   3651 		size = WM_RAL_TABSIZE_82575;
   3652 		break;
   3653 	case WM_T_82576:
   3654 	case WM_T_82580:
   3655 		size = WM_RAL_TABSIZE_82576;
   3656 		break;
   3657 	case WM_T_I350:
   3658 	case WM_T_I354:
   3659 		size = WM_RAL_TABSIZE_I350;
   3660 		break;
   3661 	default:
   3662 		size = WM_RAL_TABSIZE;
   3663 	}
   3664 
   3665 	return size;
   3666 }
   3667 
   3668 /*
   3669  * wm_set_filter:
   3670  *
   3671  *	Set up the receive filter.
   3672  */
   3673 static void
   3674 wm_set_filter(struct wm_softc *sc)
   3675 {
   3676 	struct ethercom *ec = &sc->sc_ethercom;
   3677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3678 	struct ether_multi *enm;
   3679 	struct ether_multistep step;
   3680 	bus_addr_t mta_reg;
   3681 	uint32_t hash, reg, bit;
   3682 	int i, size, ralmax;
   3683 
   3684 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3685 		device_xname(sc->sc_dev), __func__));
   3686 
   3687 	if (sc->sc_type >= WM_T_82544)
   3688 		mta_reg = WMREG_CORDOVA_MTA;
   3689 	else
   3690 		mta_reg = WMREG_MTA;
   3691 
   3692 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3693 
   3694 	if (ifp->if_flags & IFF_BROADCAST)
   3695 		sc->sc_rctl |= RCTL_BAM;
   3696 	if (ifp->if_flags & IFF_PROMISC) {
   3697 		sc->sc_rctl |= RCTL_UPE;
   3698 		goto allmulti;
   3699 	}
   3700 
   3701 	/*
   3702 	 * Set the station address in the first RAL slot, and
   3703 	 * clear the remaining slots.
   3704 	 */
   3705 	size = wm_rar_count(sc);
   3706 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3707 
   3708 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3709 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3710 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3711 		switch (i) {
   3712 		case 0:
   3713 			/* We can use all entries */
   3714 			ralmax = size;
   3715 			break;
   3716 		case 1:
   3717 			/* Only RAR[0] */
   3718 			ralmax = 1;
   3719 			break;
   3720 		default:
   3721 			/* available SHRA + RAR[0] */
   3722 			ralmax = i + 1;
   3723 		}
   3724 	} else
   3725 		ralmax = size;
   3726 	for (i = 1; i < size; i++) {
   3727 		if (i < ralmax)
   3728 			wm_set_ral(sc, NULL, i);
   3729 	}
   3730 
   3731 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3732 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3733 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3734 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3735 		size = WM_ICH8_MC_TABSIZE;
   3736 	else
   3737 		size = WM_MC_TABSIZE;
   3738 	/* Clear out the multicast table. */
   3739 	for (i = 0; i < size; i++) {
   3740 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3741 		CSR_WRITE_FLUSH(sc);
   3742 	}
   3743 
   3744 	ETHER_LOCK(ec);
   3745 	ETHER_FIRST_MULTI(step, ec, enm);
   3746 	while (enm != NULL) {
   3747 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3748 			ETHER_UNLOCK(ec);
   3749 			/*
   3750 			 * We must listen to a range of multicast addresses.
   3751 			 * For now, just accept all multicasts, rather than
   3752 			 * trying to set only those filter bits needed to match
   3753 			 * the range.  (At this time, the only use of address
   3754 			 * ranges is for IP multicast routing, for which the
   3755 			 * range is big enough to require all bits set.)
   3756 			 */
   3757 			goto allmulti;
   3758 		}
   3759 
   3760 		hash = wm_mchash(sc, enm->enm_addrlo);
   3761 
   3762 		reg = (hash >> 5);
   3763 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3764 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3765 		    || (sc->sc_type == WM_T_PCH2)
   3766 		    || (sc->sc_type == WM_T_PCH_LPT)
   3767 		    || (sc->sc_type == WM_T_PCH_SPT)
   3768 		    || (sc->sc_type == WM_T_PCH_CNP))
   3769 			reg &= 0x1f;
   3770 		else
   3771 			reg &= 0x7f;
   3772 		bit = hash & 0x1f;
   3773 
   3774 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3775 		hash |= 1U << bit;
   3776 
   3777 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3778 			/*
   3779 			 * 82544 Errata 9: Certain register cannot be written
   3780 			 * with particular alignments in PCI-X bus operation
   3781 			 * (FCAH, MTA and VFTA).
   3782 			 */
   3783 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3784 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3785 			CSR_WRITE_FLUSH(sc);
   3786 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3787 			CSR_WRITE_FLUSH(sc);
   3788 		} else {
   3789 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3790 			CSR_WRITE_FLUSH(sc);
   3791 		}
   3792 
   3793 		ETHER_NEXT_MULTI(step, enm);
   3794 	}
   3795 	ETHER_UNLOCK(ec);
   3796 
   3797 	ifp->if_flags &= ~IFF_ALLMULTI;
   3798 	goto setit;
   3799 
   3800  allmulti:
   3801 	ifp->if_flags |= IFF_ALLMULTI;
   3802 	sc->sc_rctl |= RCTL_MPE;
   3803 
   3804  setit:
   3805 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3806 }
   3807 
   3808 /* Reset and init related */
   3809 
   3810 static void
   3811 wm_set_vlan(struct wm_softc *sc)
   3812 {
   3813 
   3814 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3815 		device_xname(sc->sc_dev), __func__));
   3816 
   3817 	/* Deal with VLAN enables. */
   3818 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3819 		sc->sc_ctrl |= CTRL_VME;
   3820 	else
   3821 		sc->sc_ctrl &= ~CTRL_VME;
   3822 
   3823 	/* Write the control registers. */
   3824 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3825 }
   3826 
   3827 static void
   3828 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3829 {
   3830 	uint32_t gcr;
   3831 	pcireg_t ctrl2;
   3832 
   3833 	gcr = CSR_READ(sc, WMREG_GCR);
   3834 
   3835 	/* Only take action if timeout value is defaulted to 0 */
   3836 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3837 		goto out;
   3838 
   3839 	if ((gcr & GCR_CAP_VER2) == 0) {
   3840 		gcr |= GCR_CMPL_TMOUT_10MS;
   3841 		goto out;
   3842 	}
   3843 
   3844 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3845 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3846 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3847 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3848 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3849 
   3850 out:
   3851 	/* Disable completion timeout resend */
   3852 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3853 
   3854 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3855 }
   3856 
   3857 void
   3858 wm_get_auto_rd_done(struct wm_softc *sc)
   3859 {
   3860 	int i;
   3861 
   3862 	/* wait for eeprom to reload */
   3863 	switch (sc->sc_type) {
   3864 	case WM_T_82571:
   3865 	case WM_T_82572:
   3866 	case WM_T_82573:
   3867 	case WM_T_82574:
   3868 	case WM_T_82583:
   3869 	case WM_T_82575:
   3870 	case WM_T_82576:
   3871 	case WM_T_82580:
   3872 	case WM_T_I350:
   3873 	case WM_T_I354:
   3874 	case WM_T_I210:
   3875 	case WM_T_I211:
   3876 	case WM_T_80003:
   3877 	case WM_T_ICH8:
   3878 	case WM_T_ICH9:
   3879 		for (i = 0; i < 10; i++) {
   3880 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3881 				break;
   3882 			delay(1000);
   3883 		}
   3884 		if (i == 10) {
   3885 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3886 			    "complete\n", device_xname(sc->sc_dev));
   3887 		}
   3888 		break;
   3889 	default:
   3890 		break;
   3891 	}
   3892 }
   3893 
   3894 void
   3895 wm_lan_init_done(struct wm_softc *sc)
   3896 {
   3897 	uint32_t reg = 0;
   3898 	int i;
   3899 
   3900 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3901 		device_xname(sc->sc_dev), __func__));
   3902 
   3903 	/* Wait for eeprom to reload */
   3904 	switch (sc->sc_type) {
   3905 	case WM_T_ICH10:
   3906 	case WM_T_PCH:
   3907 	case WM_T_PCH2:
   3908 	case WM_T_PCH_LPT:
   3909 	case WM_T_PCH_SPT:
   3910 	case WM_T_PCH_CNP:
   3911 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3912 			reg = CSR_READ(sc, WMREG_STATUS);
   3913 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3914 				break;
   3915 			delay(100);
   3916 		}
   3917 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3918 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3919 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3920 		}
   3921 		break;
   3922 	default:
   3923 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3924 		    __func__);
   3925 		break;
   3926 	}
   3927 
   3928 	reg &= ~STATUS_LAN_INIT_DONE;
   3929 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3930 }
   3931 
   3932 void
   3933 wm_get_cfg_done(struct wm_softc *sc)
   3934 {
   3935 	int mask;
   3936 	uint32_t reg;
   3937 	int i;
   3938 
   3939 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3940 		device_xname(sc->sc_dev), __func__));
   3941 
   3942 	/* Wait for eeprom to reload */
   3943 	switch (sc->sc_type) {
   3944 	case WM_T_82542_2_0:
   3945 	case WM_T_82542_2_1:
   3946 		/* null */
   3947 		break;
   3948 	case WM_T_82543:
   3949 	case WM_T_82544:
   3950 	case WM_T_82540:
   3951 	case WM_T_82545:
   3952 	case WM_T_82545_3:
   3953 	case WM_T_82546:
   3954 	case WM_T_82546_3:
   3955 	case WM_T_82541:
   3956 	case WM_T_82541_2:
   3957 	case WM_T_82547:
   3958 	case WM_T_82547_2:
   3959 	case WM_T_82573:
   3960 	case WM_T_82574:
   3961 	case WM_T_82583:
   3962 		/* generic */
   3963 		delay(10*1000);
   3964 		break;
   3965 	case WM_T_80003:
   3966 	case WM_T_82571:
   3967 	case WM_T_82572:
   3968 	case WM_T_82575:
   3969 	case WM_T_82576:
   3970 	case WM_T_82580:
   3971 	case WM_T_I350:
   3972 	case WM_T_I354:
   3973 	case WM_T_I210:
   3974 	case WM_T_I211:
   3975 		if (sc->sc_type == WM_T_82571) {
   3976 			/* Only 82571 shares port 0 */
   3977 			mask = EEMNGCTL_CFGDONE_0;
   3978 		} else
   3979 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3980 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3981 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3982 				break;
   3983 			delay(1000);
   3984 		}
   3985 		if (i >= WM_PHY_CFG_TIMEOUT)
   3986 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3987 				device_xname(sc->sc_dev), __func__));
   3988 		break;
   3989 	case WM_T_ICH8:
   3990 	case WM_T_ICH9:
   3991 	case WM_T_ICH10:
   3992 	case WM_T_PCH:
   3993 	case WM_T_PCH2:
   3994 	case WM_T_PCH_LPT:
   3995 	case WM_T_PCH_SPT:
   3996 	case WM_T_PCH_CNP:
   3997 		delay(10*1000);
   3998 		if (sc->sc_type >= WM_T_ICH10)
   3999 			wm_lan_init_done(sc);
   4000 		else
   4001 			wm_get_auto_rd_done(sc);
   4002 
   4003 		/* Clear PHY Reset Asserted bit */
   4004 		reg = CSR_READ(sc, WMREG_STATUS);
   4005 		if ((reg & STATUS_PHYRA) != 0)
   4006 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4007 		break;
   4008 	default:
   4009 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4010 		    __func__);
   4011 		break;
   4012 	}
   4013 }
   4014 
   4015 int
   4016 wm_phy_post_reset(struct wm_softc *sc)
   4017 {
   4018 	device_t dev = sc->sc_dev;
   4019 	uint16_t reg;
   4020 	int rv = 0;
   4021 
   4022 	/* This function is only for ICH8 and newer. */
   4023 	if (sc->sc_type < WM_T_ICH8)
   4024 		return 0;
   4025 
   4026 	if (wm_phy_resetisblocked(sc)) {
   4027 		/* XXX */
   4028 		device_printf(dev, "PHY is blocked\n");
   4029 		return -1;
   4030 	}
   4031 
   4032 	/* Allow time for h/w to get to quiescent state after reset */
   4033 	delay(10*1000);
   4034 
   4035 	/* Perform any necessary post-reset workarounds */
   4036 	if (sc->sc_type == WM_T_PCH)
   4037 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4038 	else if (sc->sc_type == WM_T_PCH2)
   4039 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4040 	if (rv != 0)
   4041 		return rv;
   4042 
   4043 	/* Clear the host wakeup bit after lcd reset */
   4044 	if (sc->sc_type >= WM_T_PCH) {
   4045 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4046 		reg &= ~BM_WUC_HOST_WU_BIT;
   4047 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4048 	}
   4049 
   4050 	/* Configure the LCD with the extended configuration region in NVM */
   4051 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4052 		return rv;
   4053 
   4054 	/* Configure the LCD with the OEM bits in NVM */
   4055 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4056 
   4057 	if (sc->sc_type == WM_T_PCH2) {
   4058 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4059 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4060 			delay(10 * 1000);
   4061 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4062 		}
   4063 		/* Set EEE LPI Update Timer to 200usec */
   4064 		rv = sc->phy.acquire(sc);
   4065 		if (rv)
   4066 			return rv;
   4067 		rv = wm_write_emi_reg_locked(dev,
   4068 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4069 		sc->phy.release(sc);
   4070 	}
   4071 
   4072 	return rv;
   4073 }
   4074 
   4075 /* Only for PCH and newer */
   4076 static int
   4077 wm_write_smbus_addr(struct wm_softc *sc)
   4078 {
   4079 	uint32_t strap, freq;
   4080 	uint16_t phy_data;
   4081 	int rv;
   4082 
   4083 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4084 		device_xname(sc->sc_dev), __func__));
   4085 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4086 
   4087 	strap = CSR_READ(sc, WMREG_STRAP);
   4088 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4089 
   4090 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4091 	if (rv != 0)
   4092 		return -1;
   4093 
   4094 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4095 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4096 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4097 
   4098 	if (sc->sc_phytype == WMPHY_I217) {
   4099 		/* Restore SMBus frequency */
   4100 		if (freq --) {
   4101 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4102 			    | HV_SMB_ADDR_FREQ_HIGH);
   4103 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4104 			    HV_SMB_ADDR_FREQ_LOW);
   4105 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4106 			    HV_SMB_ADDR_FREQ_HIGH);
   4107 		} else
   4108 			DPRINTF(WM_DEBUG_INIT,
   4109 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4110 				device_xname(sc->sc_dev), __func__));
   4111 	}
   4112 
   4113 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4114 	    phy_data);
   4115 }
   4116 
   4117 static int
   4118 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4119 {
   4120 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4121 	uint16_t phy_page = 0;
   4122 	int rv = 0;
   4123 
   4124 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4125 		device_xname(sc->sc_dev), __func__));
   4126 
   4127 	switch (sc->sc_type) {
   4128 	case WM_T_ICH8:
   4129 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4130 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4131 			return 0;
   4132 
   4133 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4134 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4135 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4136 			break;
   4137 		}
   4138 		/* FALLTHROUGH */
   4139 	case WM_T_PCH:
   4140 	case WM_T_PCH2:
   4141 	case WM_T_PCH_LPT:
   4142 	case WM_T_PCH_SPT:
   4143 	case WM_T_PCH_CNP:
   4144 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4145 		break;
   4146 	default:
   4147 		return 0;
   4148 	}
   4149 
   4150 	if ((rv = sc->phy.acquire(sc)) != 0)
   4151 		return rv;
   4152 
   4153 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4154 	if ((reg & sw_cfg_mask) == 0)
   4155 		goto release;
   4156 
   4157 	/*
   4158 	 * Make sure HW does not configure LCD from PHY extended configuration
   4159 	 * before SW configuration
   4160 	 */
   4161 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4162 	if ((sc->sc_type < WM_T_PCH2)
   4163 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4164 		goto release;
   4165 
   4166 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4167 		device_xname(sc->sc_dev), __func__));
   4168 	/* word_addr is in DWORD */
   4169 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4170 
   4171 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4172 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4173 	if (cnf_size == 0)
   4174 		goto release;
   4175 
   4176 	if (((sc->sc_type == WM_T_PCH)
   4177 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4178 	    || (sc->sc_type > WM_T_PCH)) {
   4179 		/*
   4180 		 * HW configures the SMBus address and LEDs when the OEM and
   4181 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4182 		 * are cleared, SW will configure them instead.
   4183 		 */
   4184 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4185 			device_xname(sc->sc_dev), __func__));
   4186 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4187 			goto release;
   4188 
   4189 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4190 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4191 		    (uint16_t)reg);
   4192 		if (rv != 0)
   4193 			goto release;
   4194 	}
   4195 
   4196 	/* Configure LCD from extended configuration region. */
   4197 	for (i = 0; i < cnf_size; i++) {
   4198 		uint16_t reg_data, reg_addr;
   4199 
   4200 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4201 			goto release;
   4202 
   4203 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4204 			goto release;
   4205 
   4206 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4207 			phy_page = reg_data;
   4208 
   4209 		reg_addr &= IGPHY_MAXREGADDR;
   4210 		reg_addr |= phy_page;
   4211 
   4212 		KASSERT(sc->phy.writereg_locked != NULL);
   4213 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4214 		    reg_data);
   4215 	}
   4216 
   4217 release:
   4218 	sc->phy.release(sc);
   4219 	return rv;
   4220 }
   4221 
   4222 /*
   4223  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4224  *  @sc:       pointer to the HW structure
   4225  *  @d0_state: boolean if entering d0 or d3 device state
   4226  *
   4227  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4228  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4229  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4230  */
   4231 int
   4232 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4233 {
   4234 	uint32_t mac_reg;
   4235 	uint16_t oem_reg;
   4236 	int rv;
   4237 
   4238 	if (sc->sc_type < WM_T_PCH)
   4239 		return 0;
   4240 
   4241 	rv = sc->phy.acquire(sc);
   4242 	if (rv != 0)
   4243 		return rv;
   4244 
   4245 	if (sc->sc_type == WM_T_PCH) {
   4246 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4247 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4248 			goto release;
   4249 	}
   4250 
   4251 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4252 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4253 		goto release;
   4254 
   4255 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4256 
   4257 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4258 	if (rv != 0)
   4259 		goto release;
   4260 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4261 
   4262 	if (d0_state) {
   4263 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4264 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4265 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4266 			oem_reg |= HV_OEM_BITS_LPLU;
   4267 	} else {
   4268 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4269 		    != 0)
   4270 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4271 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4272 		    != 0)
   4273 			oem_reg |= HV_OEM_BITS_LPLU;
   4274 	}
   4275 
   4276 	/* Set Restart auto-neg to activate the bits */
   4277 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4278 	    && (wm_phy_resetisblocked(sc) == false))
   4279 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4280 
   4281 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4282 
   4283 release:
   4284 	sc->phy.release(sc);
   4285 
   4286 	return rv;
   4287 }
   4288 
   4289 /* Init hardware bits */
   4290 void
   4291 wm_initialize_hardware_bits(struct wm_softc *sc)
   4292 {
   4293 	uint32_t tarc0, tarc1, reg;
   4294 
   4295 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4296 		device_xname(sc->sc_dev), __func__));
   4297 
   4298 	/* For 82571 variant, 80003 and ICHs */
   4299 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4300 	    || (sc->sc_type >= WM_T_80003)) {
   4301 
   4302 		/* Transmit Descriptor Control 0 */
   4303 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4304 		reg |= TXDCTL_COUNT_DESC;
   4305 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4306 
   4307 		/* Transmit Descriptor Control 1 */
   4308 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4309 		reg |= TXDCTL_COUNT_DESC;
   4310 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4311 
   4312 		/* TARC0 */
   4313 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4314 		switch (sc->sc_type) {
   4315 		case WM_T_82571:
   4316 		case WM_T_82572:
   4317 		case WM_T_82573:
   4318 		case WM_T_82574:
   4319 		case WM_T_82583:
   4320 		case WM_T_80003:
   4321 			/* Clear bits 30..27 */
   4322 			tarc0 &= ~__BITS(30, 27);
   4323 			break;
   4324 		default:
   4325 			break;
   4326 		}
   4327 
   4328 		switch (sc->sc_type) {
   4329 		case WM_T_82571:
   4330 		case WM_T_82572:
   4331 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4332 
   4333 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4334 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4335 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4336 			/* 8257[12] Errata No.7 */
   4337 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4338 
   4339 			/* TARC1 bit 28 */
   4340 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4341 				tarc1 &= ~__BIT(28);
   4342 			else
   4343 				tarc1 |= __BIT(28);
   4344 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4345 
   4346 			/*
   4347 			 * 8257[12] Errata No.13
   4348 			 * Disable Dyamic Clock Gating.
   4349 			 */
   4350 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4351 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4352 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4353 			break;
   4354 		case WM_T_82573:
   4355 		case WM_T_82574:
   4356 		case WM_T_82583:
   4357 			if ((sc->sc_type == WM_T_82574)
   4358 			    || (sc->sc_type == WM_T_82583))
   4359 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4360 
   4361 			/* Extended Device Control */
   4362 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4363 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4364 			reg |= __BIT(22);	/* Set bit 22 */
   4365 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4366 
   4367 			/* Device Control */
   4368 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4369 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4370 
   4371 			/* PCIe Control Register */
   4372 			/*
   4373 			 * 82573 Errata (unknown).
   4374 			 *
   4375 			 * 82574 Errata 25 and 82583 Errata 12
   4376 			 * "Dropped Rx Packets":
   4377 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4378 			 */
   4379 			reg = CSR_READ(sc, WMREG_GCR);
   4380 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4381 			CSR_WRITE(sc, WMREG_GCR, reg);
   4382 
   4383 			if ((sc->sc_type == WM_T_82574)
   4384 			    || (sc->sc_type == WM_T_82583)) {
   4385 				/*
   4386 				 * Document says this bit must be set for
   4387 				 * proper operation.
   4388 				 */
   4389 				reg = CSR_READ(sc, WMREG_GCR);
   4390 				reg |= __BIT(22);
   4391 				CSR_WRITE(sc, WMREG_GCR, reg);
   4392 
   4393 				/*
   4394 				 * Apply workaround for hardware errata
   4395 				 * documented in errata docs Fixes issue where
   4396 				 * some error prone or unreliable PCIe
   4397 				 * completions are occurring, particularly
   4398 				 * with ASPM enabled. Without fix, issue can
   4399 				 * cause Tx timeouts.
   4400 				 */
   4401 				reg = CSR_READ(sc, WMREG_GCR2);
   4402 				reg |= __BIT(0);
   4403 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4404 			}
   4405 			break;
   4406 		case WM_T_80003:
   4407 			/* TARC0 */
   4408 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4409 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4410 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4411 
   4412 			/* TARC1 bit 28 */
   4413 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4414 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4415 				tarc1 &= ~__BIT(28);
   4416 			else
   4417 				tarc1 |= __BIT(28);
   4418 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4419 			break;
   4420 		case WM_T_ICH8:
   4421 		case WM_T_ICH9:
   4422 		case WM_T_ICH10:
   4423 		case WM_T_PCH:
   4424 		case WM_T_PCH2:
   4425 		case WM_T_PCH_LPT:
   4426 		case WM_T_PCH_SPT:
   4427 		case WM_T_PCH_CNP:
   4428 			/* TARC0 */
   4429 			if (sc->sc_type == WM_T_ICH8) {
   4430 				/* Set TARC0 bits 29 and 28 */
   4431 				tarc0 |= __BITS(29, 28);
   4432 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4433 				tarc0 |= __BIT(29);
   4434 				/*
   4435 				 *  Drop bit 28. From Linux.
   4436 				 * See I218/I219 spec update
   4437 				 * "5. Buffer Overrun While the I219 is
   4438 				 * Processing DMA Transactions"
   4439 				 */
   4440 				tarc0 &= ~__BIT(28);
   4441 			}
   4442 			/* Set TARC0 bits 23,24,26,27 */
   4443 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4444 
   4445 			/* CTRL_EXT */
   4446 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4447 			reg |= __BIT(22);	/* Set bit 22 */
   4448 			/*
   4449 			 * Enable PHY low-power state when MAC is at D3
   4450 			 * w/o WoL
   4451 			 */
   4452 			if (sc->sc_type >= WM_T_PCH)
   4453 				reg |= CTRL_EXT_PHYPDEN;
   4454 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4455 
   4456 			/* TARC1 */
   4457 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4458 			/* bit 28 */
   4459 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4460 				tarc1 &= ~__BIT(28);
   4461 			else
   4462 				tarc1 |= __BIT(28);
   4463 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4464 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4465 
   4466 			/* Device Status */
   4467 			if (sc->sc_type == WM_T_ICH8) {
   4468 				reg = CSR_READ(sc, WMREG_STATUS);
   4469 				reg &= ~__BIT(31);
   4470 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4471 
   4472 			}
   4473 
   4474 			/* IOSFPC */
   4475 			if (sc->sc_type == WM_T_PCH_SPT) {
   4476 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4477 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4478 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4479 			}
   4480 			/*
   4481 			 * Work-around descriptor data corruption issue during
   4482 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4483 			 * capability.
   4484 			 */
   4485 			reg = CSR_READ(sc, WMREG_RFCTL);
   4486 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4487 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4488 			break;
   4489 		default:
   4490 			break;
   4491 		}
   4492 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4493 
   4494 		switch (sc->sc_type) {
   4495 		/*
   4496 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4497 		 * Avoid RSS Hash Value bug.
   4498 		 */
   4499 		case WM_T_82571:
   4500 		case WM_T_82572:
   4501 		case WM_T_82573:
   4502 		case WM_T_80003:
   4503 		case WM_T_ICH8:
   4504 			reg = CSR_READ(sc, WMREG_RFCTL);
   4505 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4506 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4507 			break;
   4508 		case WM_T_82574:
   4509 			/* use extened Rx descriptor. */
   4510 			reg = CSR_READ(sc, WMREG_RFCTL);
   4511 			reg |= WMREG_RFCTL_EXSTEN;
   4512 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4513 			break;
   4514 		default:
   4515 			break;
   4516 		}
   4517 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4518 		/*
   4519 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4520 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4521 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4522 		 * Correctly by the Device"
   4523 		 *
   4524 		 * I354(C2000) Errata AVR53:
   4525 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4526 		 * Hang"
   4527 		 */
   4528 		reg = CSR_READ(sc, WMREG_RFCTL);
   4529 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4530 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4531 	}
   4532 }
   4533 
   4534 static uint32_t
   4535 wm_rxpbs_adjust_82580(uint32_t val)
   4536 {
   4537 	uint32_t rv = 0;
   4538 
   4539 	if (val < __arraycount(wm_82580_rxpbs_table))
   4540 		rv = wm_82580_rxpbs_table[val];
   4541 
   4542 	return rv;
   4543 }
   4544 
   4545 /*
   4546  * wm_reset_phy:
   4547  *
   4548  *	generic PHY reset function.
   4549  *	Same as e1000_phy_hw_reset_generic()
   4550  */
   4551 static int
   4552 wm_reset_phy(struct wm_softc *sc)
   4553 {
   4554 	uint32_t reg;
   4555 
   4556 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4557 		device_xname(sc->sc_dev), __func__));
   4558 	if (wm_phy_resetisblocked(sc))
   4559 		return -1;
   4560 
   4561 	sc->phy.acquire(sc);
   4562 
   4563 	reg = CSR_READ(sc, WMREG_CTRL);
   4564 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4565 	CSR_WRITE_FLUSH(sc);
   4566 
   4567 	delay(sc->phy.reset_delay_us);
   4568 
   4569 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4570 	CSR_WRITE_FLUSH(sc);
   4571 
   4572 	delay(150);
   4573 
   4574 	sc->phy.release(sc);
   4575 
   4576 	wm_get_cfg_done(sc);
   4577 	wm_phy_post_reset(sc);
   4578 
   4579 	return 0;
   4580 }
   4581 
   4582 /*
   4583  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4584  * so it is enough to check sc->sc_queue[0] only.
   4585  */
   4586 static void
   4587 wm_flush_desc_rings(struct wm_softc *sc)
   4588 {
   4589 	pcireg_t preg;
   4590 	uint32_t reg;
   4591 	struct wm_txqueue *txq;
   4592 	wiseman_txdesc_t *txd;
   4593 	int nexttx;
   4594 	uint32_t rctl;
   4595 
   4596 	/* First, disable MULR fix in FEXTNVM11 */
   4597 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4598 	reg |= FEXTNVM11_DIS_MULRFIX;
   4599 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4600 
   4601 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4602 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4603 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4604 		return;
   4605 
   4606 	/* TX */
   4607 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4608 	    device_xname(sc->sc_dev), preg, reg);
   4609 	reg = CSR_READ(sc, WMREG_TCTL);
   4610 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4611 
   4612 	txq = &sc->sc_queue[0].wmq_txq;
   4613 	nexttx = txq->txq_next;
   4614 	txd = &txq->txq_descs[nexttx];
   4615 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4616 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4617 	txd->wtx_fields.wtxu_status = 0;
   4618 	txd->wtx_fields.wtxu_options = 0;
   4619 	txd->wtx_fields.wtxu_vlan = 0;
   4620 
   4621 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4622 	    BUS_SPACE_BARRIER_WRITE);
   4623 
   4624 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4625 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4626 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4627 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4628 	delay(250);
   4629 
   4630 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4631 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4632 		return;
   4633 
   4634 	/* RX */
   4635 	printf("%s: Need RX flush (reg = %08x)\n",
   4636 	    device_xname(sc->sc_dev), preg);
   4637 	rctl = CSR_READ(sc, WMREG_RCTL);
   4638 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4639 	CSR_WRITE_FLUSH(sc);
   4640 	delay(150);
   4641 
   4642 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4643 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4644 	reg &= 0xffffc000;
   4645 	/*
   4646 	 * update thresholds: prefetch threshold to 31, host threshold
   4647 	 * to 1 and make sure the granularity is "descriptors" and not
   4648 	 * "cache lines"
   4649 	 */
   4650 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4651 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4652 
   4653 	/*
   4654 	 * momentarily enable the RX ring for the changes to take
   4655 	 * effect
   4656 	 */
   4657 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4658 	CSR_WRITE_FLUSH(sc);
   4659 	delay(150);
   4660 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4661 }
   4662 
   4663 /*
   4664  * wm_reset:
   4665  *
   4666  *	Reset the i82542 chip.
   4667  */
   4668 static void
   4669 wm_reset(struct wm_softc *sc)
   4670 {
   4671 	int phy_reset = 0;
   4672 	int i, error = 0;
   4673 	uint32_t reg;
   4674 	uint16_t kmreg;
   4675 	int rv;
   4676 
   4677 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4678 		device_xname(sc->sc_dev), __func__));
   4679 	KASSERT(sc->sc_type != 0);
   4680 
   4681 	/*
   4682 	 * Allocate on-chip memory according to the MTU size.
   4683 	 * The Packet Buffer Allocation register must be written
   4684 	 * before the chip is reset.
   4685 	 */
   4686 	switch (sc->sc_type) {
   4687 	case WM_T_82547:
   4688 	case WM_T_82547_2:
   4689 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4690 		    PBA_22K : PBA_30K;
   4691 		for (i = 0; i < sc->sc_nqueues; i++) {
   4692 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4693 			txq->txq_fifo_head = 0;
   4694 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4695 			txq->txq_fifo_size =
   4696 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4697 			txq->txq_fifo_stall = 0;
   4698 		}
   4699 		break;
   4700 	case WM_T_82571:
   4701 	case WM_T_82572:
   4702 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4703 	case WM_T_80003:
   4704 		sc->sc_pba = PBA_32K;
   4705 		break;
   4706 	case WM_T_82573:
   4707 		sc->sc_pba = PBA_12K;
   4708 		break;
   4709 	case WM_T_82574:
   4710 	case WM_T_82583:
   4711 		sc->sc_pba = PBA_20K;
   4712 		break;
   4713 	case WM_T_82576:
   4714 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4715 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4716 		break;
   4717 	case WM_T_82580:
   4718 	case WM_T_I350:
   4719 	case WM_T_I354:
   4720 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4721 		break;
   4722 	case WM_T_I210:
   4723 	case WM_T_I211:
   4724 		sc->sc_pba = PBA_34K;
   4725 		break;
   4726 	case WM_T_ICH8:
   4727 		/* Workaround for a bit corruption issue in FIFO memory */
   4728 		sc->sc_pba = PBA_8K;
   4729 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4730 		break;
   4731 	case WM_T_ICH9:
   4732 	case WM_T_ICH10:
   4733 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4734 		    PBA_14K : PBA_10K;
   4735 		break;
   4736 	case WM_T_PCH:
   4737 	case WM_T_PCH2:	/* XXX 14K? */
   4738 	case WM_T_PCH_LPT:
   4739 	case WM_T_PCH_SPT:
   4740 	case WM_T_PCH_CNP:
   4741 		sc->sc_pba = PBA_26K;
   4742 		break;
   4743 	default:
   4744 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4745 		    PBA_40K : PBA_48K;
   4746 		break;
   4747 	}
   4748 	/*
   4749 	 * Only old or non-multiqueue devices have the PBA register
   4750 	 * XXX Need special handling for 82575.
   4751 	 */
   4752 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4753 	    || (sc->sc_type == WM_T_82575))
   4754 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4755 
   4756 	/* Prevent the PCI-E bus from sticking */
   4757 	if (sc->sc_flags & WM_F_PCIE) {
   4758 		int timeout = 800;
   4759 
   4760 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4761 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4762 
   4763 		while (timeout--) {
   4764 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4765 			    == 0)
   4766 				break;
   4767 			delay(100);
   4768 		}
   4769 		if (timeout == 0)
   4770 			device_printf(sc->sc_dev,
   4771 			    "failed to disable busmastering\n");
   4772 	}
   4773 
   4774 	/* Set the completion timeout for interface */
   4775 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4776 	    || (sc->sc_type == WM_T_82580)
   4777 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4778 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4779 		wm_set_pcie_completion_timeout(sc);
   4780 
   4781 	/* Clear interrupt */
   4782 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4783 	if (wm_is_using_msix(sc)) {
   4784 		if (sc->sc_type != WM_T_82574) {
   4785 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4786 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4787 		} else
   4788 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4789 	}
   4790 
   4791 	/* Stop the transmit and receive processes. */
   4792 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4793 	sc->sc_rctl &= ~RCTL_EN;
   4794 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4795 	CSR_WRITE_FLUSH(sc);
   4796 
   4797 	/* XXX set_tbi_sbp_82543() */
   4798 
   4799 	delay(10*1000);
   4800 
   4801 	/* Must acquire the MDIO ownership before MAC reset */
   4802 	switch (sc->sc_type) {
   4803 	case WM_T_82573:
   4804 	case WM_T_82574:
   4805 	case WM_T_82583:
   4806 		error = wm_get_hw_semaphore_82573(sc);
   4807 		break;
   4808 	default:
   4809 		break;
   4810 	}
   4811 
   4812 	/*
   4813 	 * 82541 Errata 29? & 82547 Errata 28?
   4814 	 * See also the description about PHY_RST bit in CTRL register
   4815 	 * in 8254x_GBe_SDM.pdf.
   4816 	 */
   4817 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4818 		CSR_WRITE(sc, WMREG_CTRL,
   4819 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4820 		CSR_WRITE_FLUSH(sc);
   4821 		delay(5000);
   4822 	}
   4823 
   4824 	switch (sc->sc_type) {
   4825 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4826 	case WM_T_82541:
   4827 	case WM_T_82541_2:
   4828 	case WM_T_82547:
   4829 	case WM_T_82547_2:
   4830 		/*
   4831 		 * On some chipsets, a reset through a memory-mapped write
   4832 		 * cycle can cause the chip to reset before completing the
   4833 		 * write cycle. This causes major headache that can be avoided
   4834 		 * by issuing the reset via indirect register writes through
   4835 		 * I/O space.
   4836 		 *
   4837 		 * So, if we successfully mapped the I/O BAR at attach time,
   4838 		 * use that. Otherwise, try our luck with a memory-mapped
   4839 		 * reset.
   4840 		 */
   4841 		if (sc->sc_flags & WM_F_IOH_VALID)
   4842 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4843 		else
   4844 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4845 		break;
   4846 	case WM_T_82545_3:
   4847 	case WM_T_82546_3:
   4848 		/* Use the shadow control register on these chips. */
   4849 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4850 		break;
   4851 	case WM_T_80003:
   4852 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4853 		sc->phy.acquire(sc);
   4854 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4855 		sc->phy.release(sc);
   4856 		break;
   4857 	case WM_T_ICH8:
   4858 	case WM_T_ICH9:
   4859 	case WM_T_ICH10:
   4860 	case WM_T_PCH:
   4861 	case WM_T_PCH2:
   4862 	case WM_T_PCH_LPT:
   4863 	case WM_T_PCH_SPT:
   4864 	case WM_T_PCH_CNP:
   4865 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4866 		if (wm_phy_resetisblocked(sc) == false) {
   4867 			/*
   4868 			 * Gate automatic PHY configuration by hardware on
   4869 			 * non-managed 82579
   4870 			 */
   4871 			if ((sc->sc_type == WM_T_PCH2)
   4872 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4873 				== 0))
   4874 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4875 
   4876 			reg |= CTRL_PHY_RESET;
   4877 			phy_reset = 1;
   4878 		} else
   4879 			printf("XXX reset is blocked!!!\n");
   4880 		sc->phy.acquire(sc);
   4881 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4882 		/* Don't insert a completion barrier when reset */
   4883 		delay(20*1000);
   4884 		mutex_exit(sc->sc_ich_phymtx);
   4885 		break;
   4886 	case WM_T_82580:
   4887 	case WM_T_I350:
   4888 	case WM_T_I354:
   4889 	case WM_T_I210:
   4890 	case WM_T_I211:
   4891 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4892 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4893 			CSR_WRITE_FLUSH(sc);
   4894 		delay(5000);
   4895 		break;
   4896 	case WM_T_82542_2_0:
   4897 	case WM_T_82542_2_1:
   4898 	case WM_T_82543:
   4899 	case WM_T_82540:
   4900 	case WM_T_82545:
   4901 	case WM_T_82546:
   4902 	case WM_T_82571:
   4903 	case WM_T_82572:
   4904 	case WM_T_82573:
   4905 	case WM_T_82574:
   4906 	case WM_T_82575:
   4907 	case WM_T_82576:
   4908 	case WM_T_82583:
   4909 	default:
   4910 		/* Everything else can safely use the documented method. */
   4911 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4912 		break;
   4913 	}
   4914 
   4915 	/* Must release the MDIO ownership after MAC reset */
   4916 	switch (sc->sc_type) {
   4917 	case WM_T_82573:
   4918 	case WM_T_82574:
   4919 	case WM_T_82583:
   4920 		if (error == 0)
   4921 			wm_put_hw_semaphore_82573(sc);
   4922 		break;
   4923 	default:
   4924 		break;
   4925 	}
   4926 
   4927 	/* Set Phy Config Counter to 50msec */
   4928 	if (sc->sc_type == WM_T_PCH2) {
   4929 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4930 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4931 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4932 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4933 	}
   4934 
   4935 	if (phy_reset != 0)
   4936 		wm_get_cfg_done(sc);
   4937 
   4938 	/* reload EEPROM */
   4939 	switch (sc->sc_type) {
   4940 	case WM_T_82542_2_0:
   4941 	case WM_T_82542_2_1:
   4942 	case WM_T_82543:
   4943 	case WM_T_82544:
   4944 		delay(10);
   4945 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4946 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4947 		CSR_WRITE_FLUSH(sc);
   4948 		delay(2000);
   4949 		break;
   4950 	case WM_T_82540:
   4951 	case WM_T_82545:
   4952 	case WM_T_82545_3:
   4953 	case WM_T_82546:
   4954 	case WM_T_82546_3:
   4955 		delay(5*1000);
   4956 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4957 		break;
   4958 	case WM_T_82541:
   4959 	case WM_T_82541_2:
   4960 	case WM_T_82547:
   4961 	case WM_T_82547_2:
   4962 		delay(20000);
   4963 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4964 		break;
   4965 	case WM_T_82571:
   4966 	case WM_T_82572:
   4967 	case WM_T_82573:
   4968 	case WM_T_82574:
   4969 	case WM_T_82583:
   4970 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4971 			delay(10);
   4972 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4973 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4974 			CSR_WRITE_FLUSH(sc);
   4975 		}
   4976 		/* check EECD_EE_AUTORD */
   4977 		wm_get_auto_rd_done(sc);
   4978 		/*
   4979 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4980 		 * is set.
   4981 		 */
   4982 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4983 		    || (sc->sc_type == WM_T_82583))
   4984 			delay(25*1000);
   4985 		break;
   4986 	case WM_T_82575:
   4987 	case WM_T_82576:
   4988 	case WM_T_82580:
   4989 	case WM_T_I350:
   4990 	case WM_T_I354:
   4991 	case WM_T_I210:
   4992 	case WM_T_I211:
   4993 	case WM_T_80003:
   4994 		/* check EECD_EE_AUTORD */
   4995 		wm_get_auto_rd_done(sc);
   4996 		break;
   4997 	case WM_T_ICH8:
   4998 	case WM_T_ICH9:
   4999 	case WM_T_ICH10:
   5000 	case WM_T_PCH:
   5001 	case WM_T_PCH2:
   5002 	case WM_T_PCH_LPT:
   5003 	case WM_T_PCH_SPT:
   5004 	case WM_T_PCH_CNP:
   5005 		break;
   5006 	default:
   5007 		panic("%s: unknown type\n", __func__);
   5008 	}
   5009 
   5010 	/* Check whether EEPROM is present or not */
   5011 	switch (sc->sc_type) {
   5012 	case WM_T_82575:
   5013 	case WM_T_82576:
   5014 	case WM_T_82580:
   5015 	case WM_T_I350:
   5016 	case WM_T_I354:
   5017 	case WM_T_ICH8:
   5018 	case WM_T_ICH9:
   5019 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5020 			/* Not found */
   5021 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5022 			if (sc->sc_type == WM_T_82575)
   5023 				wm_reset_init_script_82575(sc);
   5024 		}
   5025 		break;
   5026 	default:
   5027 		break;
   5028 	}
   5029 
   5030 	if (phy_reset != 0)
   5031 		wm_phy_post_reset(sc);
   5032 
   5033 	if ((sc->sc_type == WM_T_82580)
   5034 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5035 		/* clear global device reset status bit */
   5036 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5037 	}
   5038 
   5039 	/* Clear any pending interrupt events. */
   5040 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5041 	reg = CSR_READ(sc, WMREG_ICR);
   5042 	if (wm_is_using_msix(sc)) {
   5043 		if (sc->sc_type != WM_T_82574) {
   5044 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5045 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5046 		} else
   5047 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5048 	}
   5049 
   5050 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5051 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5052 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5053 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5054 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5055 		reg |= KABGTXD_BGSQLBIAS;
   5056 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5057 	}
   5058 
   5059 	/* reload sc_ctrl */
   5060 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5061 
   5062 	wm_set_eee(sc);
   5063 
   5064 	/*
   5065 	 * For PCH, this write will make sure that any noise will be detected
   5066 	 * as a CRC error and be dropped rather than show up as a bad packet
   5067 	 * to the DMA engine
   5068 	 */
   5069 	if (sc->sc_type == WM_T_PCH)
   5070 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5071 
   5072 	if (sc->sc_type >= WM_T_82544)
   5073 		CSR_WRITE(sc, WMREG_WUC, 0);
   5074 
   5075 	if (sc->sc_type < WM_T_82575)
   5076 		wm_disable_aspm(sc); /* Workaround for some chips */
   5077 
   5078 	wm_reset_mdicnfg_82580(sc);
   5079 
   5080 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5081 		wm_pll_workaround_i210(sc);
   5082 
   5083 	if (sc->sc_type == WM_T_80003) {
   5084 		/* default to TRUE to enable the MDIC W/A */
   5085 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5086 
   5087 		rv = wm_kmrn_readreg(sc,
   5088 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5089 		if (rv == 0) {
   5090 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5091 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5092 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5093 			else
   5094 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5095 		}
   5096 	}
   5097 }
   5098 
   5099 /*
   5100  * wm_add_rxbuf:
   5101  *
   5102  *	Add a receive buffer to the indiciated descriptor.
   5103  */
   5104 static int
   5105 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5106 {
   5107 	struct wm_softc *sc = rxq->rxq_sc;
   5108 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5109 	struct mbuf *m;
   5110 	int error;
   5111 
   5112 	KASSERT(mutex_owned(rxq->rxq_lock));
   5113 
   5114 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5115 	if (m == NULL)
   5116 		return ENOBUFS;
   5117 
   5118 	MCLGET(m, M_DONTWAIT);
   5119 	if ((m->m_flags & M_EXT) == 0) {
   5120 		m_freem(m);
   5121 		return ENOBUFS;
   5122 	}
   5123 
   5124 	if (rxs->rxs_mbuf != NULL)
   5125 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5126 
   5127 	rxs->rxs_mbuf = m;
   5128 
   5129 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5130 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5131 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5132 	if (error) {
   5133 		/* XXX XXX XXX */
   5134 		aprint_error_dev(sc->sc_dev,
   5135 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5136 		panic("wm_add_rxbuf");
   5137 	}
   5138 
   5139 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5140 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5141 
   5142 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5143 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5144 			wm_init_rxdesc(rxq, idx);
   5145 	} else
   5146 		wm_init_rxdesc(rxq, idx);
   5147 
   5148 	return 0;
   5149 }
   5150 
   5151 /*
   5152  * wm_rxdrain:
   5153  *
   5154  *	Drain the receive queue.
   5155  */
   5156 static void
   5157 wm_rxdrain(struct wm_rxqueue *rxq)
   5158 {
   5159 	struct wm_softc *sc = rxq->rxq_sc;
   5160 	struct wm_rxsoft *rxs;
   5161 	int i;
   5162 
   5163 	KASSERT(mutex_owned(rxq->rxq_lock));
   5164 
   5165 	for (i = 0; i < WM_NRXDESC; i++) {
   5166 		rxs = &rxq->rxq_soft[i];
   5167 		if (rxs->rxs_mbuf != NULL) {
   5168 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5169 			m_freem(rxs->rxs_mbuf);
   5170 			rxs->rxs_mbuf = NULL;
   5171 		}
   5172 	}
   5173 }
   5174 
   5175 /*
   5176  * Setup registers for RSS.
   5177  *
   5178  * XXX not yet VMDq support
   5179  */
   5180 static void
   5181 wm_init_rss(struct wm_softc *sc)
   5182 {
   5183 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5184 	int i;
   5185 
   5186 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5187 
   5188 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5189 		int qid, reta_ent;
   5190 
   5191 		qid  = i % sc->sc_nqueues;
   5192 		switch (sc->sc_type) {
   5193 		case WM_T_82574:
   5194 			reta_ent = __SHIFTIN(qid,
   5195 			    RETA_ENT_QINDEX_MASK_82574);
   5196 			break;
   5197 		case WM_T_82575:
   5198 			reta_ent = __SHIFTIN(qid,
   5199 			    RETA_ENT_QINDEX1_MASK_82575);
   5200 			break;
   5201 		default:
   5202 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5203 			break;
   5204 		}
   5205 
   5206 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5207 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5208 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5209 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5210 	}
   5211 
   5212 	rss_getkey((uint8_t *)rss_key);
   5213 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5214 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5215 
   5216 	if (sc->sc_type == WM_T_82574)
   5217 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5218 	else
   5219 		mrqc = MRQC_ENABLE_RSS_MQ;
   5220 
   5221 	/*
   5222 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5223 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5224 	 */
   5225 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5226 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5227 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5228 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5229 
   5230 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5231 }
   5232 
   5233 /*
   5234  * Adjust TX and RX queue numbers which the system actulally uses.
   5235  *
   5236  * The numbers are affected by below parameters.
   5237  *     - The nubmer of hardware queues
   5238  *     - The number of MSI-X vectors (= "nvectors" argument)
   5239  *     - ncpu
   5240  */
   5241 static void
   5242 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5243 {
   5244 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5245 
   5246 	if (nvectors < 2) {
   5247 		sc->sc_nqueues = 1;
   5248 		return;
   5249 	}
   5250 
   5251 	switch (sc->sc_type) {
   5252 	case WM_T_82572:
   5253 		hw_ntxqueues = 2;
   5254 		hw_nrxqueues = 2;
   5255 		break;
   5256 	case WM_T_82574:
   5257 		hw_ntxqueues = 2;
   5258 		hw_nrxqueues = 2;
   5259 		break;
   5260 	case WM_T_82575:
   5261 		hw_ntxqueues = 4;
   5262 		hw_nrxqueues = 4;
   5263 		break;
   5264 	case WM_T_82576:
   5265 		hw_ntxqueues = 16;
   5266 		hw_nrxqueues = 16;
   5267 		break;
   5268 	case WM_T_82580:
   5269 	case WM_T_I350:
   5270 	case WM_T_I354:
   5271 		hw_ntxqueues = 8;
   5272 		hw_nrxqueues = 8;
   5273 		break;
   5274 	case WM_T_I210:
   5275 		hw_ntxqueues = 4;
   5276 		hw_nrxqueues = 4;
   5277 		break;
   5278 	case WM_T_I211:
   5279 		hw_ntxqueues = 2;
   5280 		hw_nrxqueues = 2;
   5281 		break;
   5282 		/*
   5283 		 * As below ethernet controllers does not support MSI-X,
   5284 		 * this driver let them not use multiqueue.
   5285 		 *     - WM_T_80003
   5286 		 *     - WM_T_ICH8
   5287 		 *     - WM_T_ICH9
   5288 		 *     - WM_T_ICH10
   5289 		 *     - WM_T_PCH
   5290 		 *     - WM_T_PCH2
   5291 		 *     - WM_T_PCH_LPT
   5292 		 */
   5293 	default:
   5294 		hw_ntxqueues = 1;
   5295 		hw_nrxqueues = 1;
   5296 		break;
   5297 	}
   5298 
   5299 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5300 
   5301 	/*
   5302 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5303 	 * the number of queues used actually.
   5304 	 */
   5305 	if (nvectors < hw_nqueues + 1)
   5306 		sc->sc_nqueues = nvectors - 1;
   5307 	else
   5308 		sc->sc_nqueues = hw_nqueues;
   5309 
   5310 	/*
   5311 	 * As queues more then cpus cannot improve scaling, we limit
   5312 	 * the number of queues used actually.
   5313 	 */
   5314 	if (ncpu < sc->sc_nqueues)
   5315 		sc->sc_nqueues = ncpu;
   5316 }
   5317 
   5318 static inline bool
   5319 wm_is_using_msix(struct wm_softc *sc)
   5320 {
   5321 
   5322 	return (sc->sc_nintrs > 1);
   5323 }
   5324 
   5325 static inline bool
   5326 wm_is_using_multiqueue(struct wm_softc *sc)
   5327 {
   5328 
   5329 	return (sc->sc_nqueues > 1);
   5330 }
   5331 
   5332 static int
   5333 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5334 {
   5335 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5336 	wmq->wmq_id = qidx;
   5337 	wmq->wmq_intr_idx = intr_idx;
   5338 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5339 #ifdef WM_MPSAFE
   5340 	    | SOFTINT_MPSAFE
   5341 #endif
   5342 	    , wm_handle_queue, wmq);
   5343 	if (wmq->wmq_si != NULL)
   5344 		return 0;
   5345 
   5346 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5347 	    wmq->wmq_id);
   5348 
   5349 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5350 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5351 	return ENOMEM;
   5352 }
   5353 
   5354 /*
   5355  * Both single interrupt MSI and INTx can use this function.
   5356  */
   5357 static int
   5358 wm_setup_legacy(struct wm_softc *sc)
   5359 {
   5360 	pci_chipset_tag_t pc = sc->sc_pc;
   5361 	const char *intrstr = NULL;
   5362 	char intrbuf[PCI_INTRSTR_LEN];
   5363 	int error;
   5364 
   5365 	error = wm_alloc_txrx_queues(sc);
   5366 	if (error) {
   5367 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5368 		    error);
   5369 		return ENOMEM;
   5370 	}
   5371 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5372 	    sizeof(intrbuf));
   5373 #ifdef WM_MPSAFE
   5374 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5375 #endif
   5376 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5377 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5378 	if (sc->sc_ihs[0] == NULL) {
   5379 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5380 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5381 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5382 		return ENOMEM;
   5383 	}
   5384 
   5385 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5386 	sc->sc_nintrs = 1;
   5387 
   5388 	return wm_softint_establish(sc, 0, 0);
   5389 }
   5390 
   5391 static int
   5392 wm_setup_msix(struct wm_softc *sc)
   5393 {
   5394 	void *vih;
   5395 	kcpuset_t *affinity;
   5396 	int qidx, error, intr_idx, txrx_established;
   5397 	pci_chipset_tag_t pc = sc->sc_pc;
   5398 	const char *intrstr = NULL;
   5399 	char intrbuf[PCI_INTRSTR_LEN];
   5400 	char intr_xname[INTRDEVNAMEBUF];
   5401 
   5402 	if (sc->sc_nqueues < ncpu) {
   5403 		/*
   5404 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5405 		 * interrupts start from CPU#1.
   5406 		 */
   5407 		sc->sc_affinity_offset = 1;
   5408 	} else {
   5409 		/*
   5410 		 * In this case, this device use all CPUs. So, we unify
   5411 		 * affinitied cpu_index to msix vector number for readability.
   5412 		 */
   5413 		sc->sc_affinity_offset = 0;
   5414 	}
   5415 
   5416 	error = wm_alloc_txrx_queues(sc);
   5417 	if (error) {
   5418 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5419 		    error);
   5420 		return ENOMEM;
   5421 	}
   5422 
   5423 	kcpuset_create(&affinity, false);
   5424 	intr_idx = 0;
   5425 
   5426 	/*
   5427 	 * TX and RX
   5428 	 */
   5429 	txrx_established = 0;
   5430 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5431 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5432 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5433 
   5434 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5435 		    sizeof(intrbuf));
   5436 #ifdef WM_MPSAFE
   5437 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5438 		    PCI_INTR_MPSAFE, true);
   5439 #endif
   5440 		memset(intr_xname, 0, sizeof(intr_xname));
   5441 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5442 		    device_xname(sc->sc_dev), qidx);
   5443 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5444 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5445 		if (vih == NULL) {
   5446 			aprint_error_dev(sc->sc_dev,
   5447 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5448 			    intrstr ? " at " : "",
   5449 			    intrstr ? intrstr : "");
   5450 
   5451 			goto fail;
   5452 		}
   5453 		kcpuset_zero(affinity);
   5454 		/* Round-robin affinity */
   5455 		kcpuset_set(affinity, affinity_to);
   5456 		error = interrupt_distribute(vih, affinity, NULL);
   5457 		if (error == 0) {
   5458 			aprint_normal_dev(sc->sc_dev,
   5459 			    "for TX and RX interrupting at %s affinity to %u\n",
   5460 			    intrstr, affinity_to);
   5461 		} else {
   5462 			aprint_normal_dev(sc->sc_dev,
   5463 			    "for TX and RX interrupting at %s\n", intrstr);
   5464 		}
   5465 		sc->sc_ihs[intr_idx] = vih;
   5466 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5467 			goto fail;
   5468 		txrx_established++;
   5469 		intr_idx++;
   5470 	}
   5471 
   5472 	/*
   5473 	 * LINK
   5474 	 */
   5475 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5476 	    sizeof(intrbuf));
   5477 #ifdef WM_MPSAFE
   5478 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5479 #endif
   5480 	memset(intr_xname, 0, sizeof(intr_xname));
   5481 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5482 	    device_xname(sc->sc_dev));
   5483 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5484 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5485 	if (vih == NULL) {
   5486 		aprint_error_dev(sc->sc_dev,
   5487 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5488 		    intrstr ? " at " : "",
   5489 		    intrstr ? intrstr : "");
   5490 
   5491 		goto fail;
   5492 	}
   5493 	/* keep default affinity to LINK interrupt */
   5494 	aprint_normal_dev(sc->sc_dev,
   5495 	    "for LINK interrupting at %s\n", intrstr);
   5496 	sc->sc_ihs[intr_idx] = vih;
   5497 	sc->sc_link_intr_idx = intr_idx;
   5498 
   5499 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5500 	kcpuset_destroy(affinity);
   5501 	return 0;
   5502 
   5503  fail:
   5504 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5505 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5506 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5507 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5508 	}
   5509 
   5510 	kcpuset_destroy(affinity);
   5511 	return ENOMEM;
   5512 }
   5513 
   5514 static void
   5515 wm_unset_stopping_flags(struct wm_softc *sc)
   5516 {
   5517 	int i;
   5518 
   5519 	KASSERT(WM_CORE_LOCKED(sc));
   5520 
   5521 	/*
   5522 	 * must unset stopping flags in ascending order.
   5523 	 */
   5524 	for (i = 0; i < sc->sc_nqueues; i++) {
   5525 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5526 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5527 
   5528 		mutex_enter(txq->txq_lock);
   5529 		txq->txq_stopping = false;
   5530 		mutex_exit(txq->txq_lock);
   5531 
   5532 		mutex_enter(rxq->rxq_lock);
   5533 		rxq->rxq_stopping = false;
   5534 		mutex_exit(rxq->rxq_lock);
   5535 	}
   5536 
   5537 	sc->sc_core_stopping = false;
   5538 }
   5539 
   5540 static void
   5541 wm_set_stopping_flags(struct wm_softc *sc)
   5542 {
   5543 	int i;
   5544 
   5545 	KASSERT(WM_CORE_LOCKED(sc));
   5546 
   5547 	sc->sc_core_stopping = true;
   5548 
   5549 	/*
   5550 	 * must set stopping flags in ascending order.
   5551 	 */
   5552 	for (i = 0; i < sc->sc_nqueues; i++) {
   5553 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5554 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5555 
   5556 		mutex_enter(rxq->rxq_lock);
   5557 		rxq->rxq_stopping = true;
   5558 		mutex_exit(rxq->rxq_lock);
   5559 
   5560 		mutex_enter(txq->txq_lock);
   5561 		txq->txq_stopping = true;
   5562 		mutex_exit(txq->txq_lock);
   5563 	}
   5564 }
   5565 
   5566 /*
   5567  * write interrupt interval value to ITR or EITR
   5568  */
   5569 static void
   5570 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5571 {
   5572 
   5573 	if (!wmq->wmq_set_itr)
   5574 		return;
   5575 
   5576 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5577 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5578 
   5579 		/*
   5580 		 * 82575 doesn't have CNT_INGR field.
   5581 		 * So, overwrite counter field by software.
   5582 		 */
   5583 		if (sc->sc_type == WM_T_82575)
   5584 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5585 		else
   5586 			eitr |= EITR_CNT_INGR;
   5587 
   5588 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5589 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5590 		/*
   5591 		 * 82574 has both ITR and EITR. SET EITR when we use
   5592 		 * the multi queue function with MSI-X.
   5593 		 */
   5594 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5595 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5596 	} else {
   5597 		KASSERT(wmq->wmq_id == 0);
   5598 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5599 	}
   5600 
   5601 	wmq->wmq_set_itr = false;
   5602 }
   5603 
   5604 /*
   5605  * TODO
   5606  * Below dynamic calculation of itr is almost the same as linux igb,
   5607  * however it does not fit to wm(4). So, we will have been disable AIM
   5608  * until we will find appropriate calculation of itr.
   5609  */
   5610 /*
   5611  * calculate interrupt interval value to be going to write register in
   5612  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5613  */
   5614 static void
   5615 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5616 {
   5617 #ifdef NOTYET
   5618 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5619 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5620 	uint32_t avg_size = 0;
   5621 	uint32_t new_itr;
   5622 
   5623 	if (rxq->rxq_packets)
   5624 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5625 	if (txq->txq_packets)
   5626 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5627 
   5628 	if (avg_size == 0) {
   5629 		new_itr = 450; /* restore default value */
   5630 		goto out;
   5631 	}
   5632 
   5633 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5634 	avg_size += 24;
   5635 
   5636 	/* Don't starve jumbo frames */
   5637 	avg_size = uimin(avg_size, 3000);
   5638 
   5639 	/* Give a little boost to mid-size frames */
   5640 	if ((avg_size > 300) && (avg_size < 1200))
   5641 		new_itr = avg_size / 3;
   5642 	else
   5643 		new_itr = avg_size / 2;
   5644 
   5645 out:
   5646 	/*
   5647 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5648 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5649 	 */
   5650 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5651 		new_itr *= 4;
   5652 
   5653 	if (new_itr != wmq->wmq_itr) {
   5654 		wmq->wmq_itr = new_itr;
   5655 		wmq->wmq_set_itr = true;
   5656 	} else
   5657 		wmq->wmq_set_itr = false;
   5658 
   5659 	rxq->rxq_packets = 0;
   5660 	rxq->rxq_bytes = 0;
   5661 	txq->txq_packets = 0;
   5662 	txq->txq_bytes = 0;
   5663 #endif
   5664 }
   5665 
   5666 /*
   5667  * wm_init:		[ifnet interface function]
   5668  *
   5669  *	Initialize the interface.
   5670  */
   5671 static int
   5672 wm_init(struct ifnet *ifp)
   5673 {
   5674 	struct wm_softc *sc = ifp->if_softc;
   5675 	int ret;
   5676 
   5677 	WM_CORE_LOCK(sc);
   5678 	ret = wm_init_locked(ifp);
   5679 	WM_CORE_UNLOCK(sc);
   5680 
   5681 	return ret;
   5682 }
   5683 
   5684 static int
   5685 wm_init_locked(struct ifnet *ifp)
   5686 {
   5687 	struct wm_softc *sc = ifp->if_softc;
   5688 	struct ethercom *ec = &sc->sc_ethercom;
   5689 	int i, j, trynum, error = 0;
   5690 	uint32_t reg;
   5691 
   5692 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5693 		device_xname(sc->sc_dev), __func__));
   5694 	KASSERT(WM_CORE_LOCKED(sc));
   5695 
   5696 	/*
   5697 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5698 	 * There is a small but measurable benefit to avoiding the adjusment
   5699 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5700 	 * on such platforms.  One possibility is that the DMA itself is
   5701 	 * slightly more efficient if the front of the entire packet (instead
   5702 	 * of the front of the headers) is aligned.
   5703 	 *
   5704 	 * Note we must always set align_tweak to 0 if we are using
   5705 	 * jumbo frames.
   5706 	 */
   5707 #ifdef __NO_STRICT_ALIGNMENT
   5708 	sc->sc_align_tweak = 0;
   5709 #else
   5710 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5711 		sc->sc_align_tweak = 0;
   5712 	else
   5713 		sc->sc_align_tweak = 2;
   5714 #endif /* __NO_STRICT_ALIGNMENT */
   5715 
   5716 	/* Cancel any pending I/O. */
   5717 	wm_stop_locked(ifp, 0);
   5718 
   5719 	/* update statistics before reset */
   5720 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5721 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5722 
   5723 	/* PCH_SPT hardware workaround */
   5724 	if (sc->sc_type == WM_T_PCH_SPT)
   5725 		wm_flush_desc_rings(sc);
   5726 
   5727 	/* Reset the chip to a known state. */
   5728 	wm_reset(sc);
   5729 
   5730 	/*
   5731 	 * AMT based hardware can now take control from firmware
   5732 	 * Do this after reset.
   5733 	 */
   5734 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5735 		wm_get_hw_control(sc);
   5736 
   5737 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5738 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5739 		wm_legacy_irq_quirk_spt(sc);
   5740 
   5741 	/* Init hardware bits */
   5742 	wm_initialize_hardware_bits(sc);
   5743 
   5744 	/* Reset the PHY. */
   5745 	if (sc->sc_flags & WM_F_HAS_MII)
   5746 		wm_gmii_reset(sc);
   5747 
   5748 	if (sc->sc_type >= WM_T_ICH8) {
   5749 		reg = CSR_READ(sc, WMREG_GCR);
   5750 		/*
   5751 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5752 		 * default after reset.
   5753 		 */
   5754 		if (sc->sc_type == WM_T_ICH8)
   5755 			reg |= GCR_NO_SNOOP_ALL;
   5756 		else
   5757 			reg &= ~GCR_NO_SNOOP_ALL;
   5758 		CSR_WRITE(sc, WMREG_GCR, reg);
   5759 	}
   5760 	if ((sc->sc_type >= WM_T_ICH8)
   5761 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5762 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5763 
   5764 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5765 		reg |= CTRL_EXT_RO_DIS;
   5766 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5767 	}
   5768 
   5769 	/* Calculate (E)ITR value */
   5770 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5771 		/*
   5772 		 * For NEWQUEUE's EITR (except for 82575).
   5773 		 * 82575's EITR should be set same throttling value as other
   5774 		 * old controllers' ITR because the interrupt/sec calculation
   5775 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5776 		 *
   5777 		 * 82574's EITR should be set same throttling value as ITR.
   5778 		 *
   5779 		 * For N interrupts/sec, set this value to:
   5780 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5781 		 */
   5782 		sc->sc_itr_init = 450;
   5783 	} else if (sc->sc_type >= WM_T_82543) {
   5784 		/*
   5785 		 * Set up the interrupt throttling register (units of 256ns)
   5786 		 * Note that a footnote in Intel's documentation says this
   5787 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5788 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5789 		 * that that is also true for the 1024ns units of the other
   5790 		 * interrupt-related timer registers -- so, really, we ought
   5791 		 * to divide this value by 4 when the link speed is low.
   5792 		 *
   5793 		 * XXX implement this division at link speed change!
   5794 		 */
   5795 
   5796 		/*
   5797 		 * For N interrupts/sec, set this value to:
   5798 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5799 		 * absolute and packet timer values to this value
   5800 		 * divided by 4 to get "simple timer" behavior.
   5801 		 */
   5802 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5803 	}
   5804 
   5805 	error = wm_init_txrx_queues(sc);
   5806 	if (error)
   5807 		goto out;
   5808 
   5809 	/*
   5810 	 * Clear out the VLAN table -- we don't use it (yet).
   5811 	 */
   5812 	CSR_WRITE(sc, WMREG_VET, 0);
   5813 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5814 		trynum = 10; /* Due to hw errata */
   5815 	else
   5816 		trynum = 1;
   5817 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5818 		for (j = 0; j < trynum; j++)
   5819 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5820 
   5821 	/*
   5822 	 * Set up flow-control parameters.
   5823 	 *
   5824 	 * XXX Values could probably stand some tuning.
   5825 	 */
   5826 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5827 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5828 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5829 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5830 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5831 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5832 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5833 	}
   5834 
   5835 	sc->sc_fcrtl = FCRTL_DFLT;
   5836 	if (sc->sc_type < WM_T_82543) {
   5837 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5838 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5839 	} else {
   5840 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5841 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5842 	}
   5843 
   5844 	if (sc->sc_type == WM_T_80003)
   5845 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5846 	else
   5847 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5848 
   5849 	/* Writes the control register. */
   5850 	wm_set_vlan(sc);
   5851 
   5852 	if (sc->sc_flags & WM_F_HAS_MII) {
   5853 		uint16_t kmreg;
   5854 
   5855 		switch (sc->sc_type) {
   5856 		case WM_T_80003:
   5857 		case WM_T_ICH8:
   5858 		case WM_T_ICH9:
   5859 		case WM_T_ICH10:
   5860 		case WM_T_PCH:
   5861 		case WM_T_PCH2:
   5862 		case WM_T_PCH_LPT:
   5863 		case WM_T_PCH_SPT:
   5864 		case WM_T_PCH_CNP:
   5865 			/*
   5866 			 * Set the mac to wait the maximum time between each
   5867 			 * iteration and increase the max iterations when
   5868 			 * polling the phy; this fixes erroneous timeouts at
   5869 			 * 10Mbps.
   5870 			 */
   5871 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5872 			    0xFFFF);
   5873 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5874 			    &kmreg);
   5875 			kmreg |= 0x3F;
   5876 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5877 			    kmreg);
   5878 			break;
   5879 		default:
   5880 			break;
   5881 		}
   5882 
   5883 		if (sc->sc_type == WM_T_80003) {
   5884 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5885 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5886 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5887 
   5888 			/* Bypass RX and TX FIFO's */
   5889 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5890 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5891 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5892 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5893 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5894 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5895 		}
   5896 	}
   5897 #if 0
   5898 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5899 #endif
   5900 
   5901 	/* Set up checksum offload parameters. */
   5902 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5903 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5904 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5905 		reg |= RXCSUM_IPOFL;
   5906 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5907 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5908 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5909 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5910 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5911 
   5912 	/* Set registers about MSI-X */
   5913 	if (wm_is_using_msix(sc)) {
   5914 		uint32_t ivar;
   5915 		struct wm_queue *wmq;
   5916 		int qid, qintr_idx;
   5917 
   5918 		if (sc->sc_type == WM_T_82575) {
   5919 			/* Interrupt control */
   5920 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5921 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5922 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5923 
   5924 			/* TX and RX */
   5925 			for (i = 0; i < sc->sc_nqueues; i++) {
   5926 				wmq = &sc->sc_queue[i];
   5927 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5928 				    EITR_TX_QUEUE(wmq->wmq_id)
   5929 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5930 			}
   5931 			/* Link status */
   5932 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5933 			    EITR_OTHER);
   5934 		} else if (sc->sc_type == WM_T_82574) {
   5935 			/* Interrupt control */
   5936 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5937 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5938 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5939 
   5940 			/*
   5941 			 * workaround issue with spurious interrupts
   5942 			 * in MSI-X mode.
   5943 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5944 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5945 			 */
   5946 			reg = CSR_READ(sc, WMREG_RFCTL);
   5947 			reg |= WMREG_RFCTL_ACKDIS;
   5948 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5949 
   5950 			ivar = 0;
   5951 			/* TX and RX */
   5952 			for (i = 0; i < sc->sc_nqueues; i++) {
   5953 				wmq = &sc->sc_queue[i];
   5954 				qid = wmq->wmq_id;
   5955 				qintr_idx = wmq->wmq_intr_idx;
   5956 
   5957 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5958 				    IVAR_TX_MASK_Q_82574(qid));
   5959 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5960 				    IVAR_RX_MASK_Q_82574(qid));
   5961 			}
   5962 			/* Link status */
   5963 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5964 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5965 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5966 		} else {
   5967 			/* Interrupt control */
   5968 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5969 			    | GPIE_EIAME | GPIE_PBA);
   5970 
   5971 			switch (sc->sc_type) {
   5972 			case WM_T_82580:
   5973 			case WM_T_I350:
   5974 			case WM_T_I354:
   5975 			case WM_T_I210:
   5976 			case WM_T_I211:
   5977 				/* TX and RX */
   5978 				for (i = 0; i < sc->sc_nqueues; i++) {
   5979 					wmq = &sc->sc_queue[i];
   5980 					qid = wmq->wmq_id;
   5981 					qintr_idx = wmq->wmq_intr_idx;
   5982 
   5983 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5984 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5985 					ivar |= __SHIFTIN((qintr_idx
   5986 						| IVAR_VALID),
   5987 					    IVAR_TX_MASK_Q(qid));
   5988 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5989 					ivar |= __SHIFTIN((qintr_idx
   5990 						| IVAR_VALID),
   5991 					    IVAR_RX_MASK_Q(qid));
   5992 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5993 				}
   5994 				break;
   5995 			case WM_T_82576:
   5996 				/* TX and RX */
   5997 				for (i = 0; i < sc->sc_nqueues; i++) {
   5998 					wmq = &sc->sc_queue[i];
   5999 					qid = wmq->wmq_id;
   6000 					qintr_idx = wmq->wmq_intr_idx;
   6001 
   6002 					ivar = CSR_READ(sc,
   6003 					    WMREG_IVAR_Q_82576(qid));
   6004 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6005 					ivar |= __SHIFTIN((qintr_idx
   6006 						| IVAR_VALID),
   6007 					    IVAR_TX_MASK_Q_82576(qid));
   6008 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6009 					ivar |= __SHIFTIN((qintr_idx
   6010 						| IVAR_VALID),
   6011 					    IVAR_RX_MASK_Q_82576(qid));
   6012 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6013 					    ivar);
   6014 				}
   6015 				break;
   6016 			default:
   6017 				break;
   6018 			}
   6019 
   6020 			/* Link status */
   6021 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6022 			    IVAR_MISC_OTHER);
   6023 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6024 		}
   6025 
   6026 		if (wm_is_using_multiqueue(sc)) {
   6027 			wm_init_rss(sc);
   6028 
   6029 			/*
   6030 			** NOTE: Receive Full-Packet Checksum Offload
   6031 			** is mutually exclusive with Multiqueue. However
   6032 			** this is not the same as TCP/IP checksums which
   6033 			** still work.
   6034 			*/
   6035 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6036 			reg |= RXCSUM_PCSD;
   6037 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6038 		}
   6039 	}
   6040 
   6041 	/* Set up the interrupt registers. */
   6042 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6043 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6044 	    ICR_RXO | ICR_RXT0;
   6045 	if (wm_is_using_msix(sc)) {
   6046 		uint32_t mask;
   6047 		struct wm_queue *wmq;
   6048 
   6049 		switch (sc->sc_type) {
   6050 		case WM_T_82574:
   6051 			mask = 0;
   6052 			for (i = 0; i < sc->sc_nqueues; i++) {
   6053 				wmq = &sc->sc_queue[i];
   6054 				mask |= ICR_TXQ(wmq->wmq_id);
   6055 				mask |= ICR_RXQ(wmq->wmq_id);
   6056 			}
   6057 			mask |= ICR_OTHER;
   6058 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6059 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6060 			break;
   6061 		default:
   6062 			if (sc->sc_type == WM_T_82575) {
   6063 				mask = 0;
   6064 				for (i = 0; i < sc->sc_nqueues; i++) {
   6065 					wmq = &sc->sc_queue[i];
   6066 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6067 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6068 				}
   6069 				mask |= EITR_OTHER;
   6070 			} else {
   6071 				mask = 0;
   6072 				for (i = 0; i < sc->sc_nqueues; i++) {
   6073 					wmq = &sc->sc_queue[i];
   6074 					mask |= 1 << wmq->wmq_intr_idx;
   6075 				}
   6076 				mask |= 1 << sc->sc_link_intr_idx;
   6077 			}
   6078 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6079 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6080 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6081 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6082 			break;
   6083 		}
   6084 	} else
   6085 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6086 
   6087 	/* Set up the inter-packet gap. */
   6088 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6089 
   6090 	if (sc->sc_type >= WM_T_82543) {
   6091 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6092 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6093 			wm_itrs_writereg(sc, wmq);
   6094 		}
   6095 		/*
   6096 		 * Link interrupts occur much less than TX
   6097 		 * interrupts and RX interrupts. So, we don't
   6098 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6099 		 * FreeBSD's if_igb.
   6100 		 */
   6101 	}
   6102 
   6103 	/* Set the VLAN ethernetype. */
   6104 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6105 
   6106 	/*
   6107 	 * Set up the transmit control register; we start out with
   6108 	 * a collision distance suitable for FDX, but update it whe
   6109 	 * we resolve the media type.
   6110 	 */
   6111 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6112 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6113 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6114 	if (sc->sc_type >= WM_T_82571)
   6115 		sc->sc_tctl |= TCTL_MULR;
   6116 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6117 
   6118 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6119 		/* Write TDT after TCTL.EN is set. See the document. */
   6120 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6121 	}
   6122 
   6123 	if (sc->sc_type == WM_T_80003) {
   6124 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6125 		reg &= ~TCTL_EXT_GCEX_MASK;
   6126 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6127 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6128 	}
   6129 
   6130 	/* Set the media. */
   6131 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6132 		goto out;
   6133 
   6134 	/* Configure for OS presence */
   6135 	wm_init_manageability(sc);
   6136 
   6137 	/*
   6138 	 * Set up the receive control register; we actually program the
   6139 	 * register when we set the receive filter. Use multicast address
   6140 	 * offset type 0.
   6141 	 *
   6142 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6143 	 * don't enable that feature.
   6144 	 */
   6145 	sc->sc_mchash_type = 0;
   6146 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6147 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6148 
   6149 	/*
   6150 	 * 82574 use one buffer extended Rx descriptor.
   6151 	 */
   6152 	if (sc->sc_type == WM_T_82574)
   6153 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6154 
   6155 	/*
   6156 	 * The I350 has a bug where it always strips the CRC whether
   6157 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6158 	 */
   6159 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6160 	    || (sc->sc_type == WM_T_I210))
   6161 		sc->sc_rctl |= RCTL_SECRC;
   6162 
   6163 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6164 	    && (ifp->if_mtu > ETHERMTU)) {
   6165 		sc->sc_rctl |= RCTL_LPE;
   6166 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6167 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6168 	}
   6169 
   6170 	if (MCLBYTES == 2048)
   6171 		sc->sc_rctl |= RCTL_2k;
   6172 	else {
   6173 		if (sc->sc_type >= WM_T_82543) {
   6174 			switch (MCLBYTES) {
   6175 			case 4096:
   6176 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6177 				break;
   6178 			case 8192:
   6179 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6180 				break;
   6181 			case 16384:
   6182 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6183 				break;
   6184 			default:
   6185 				panic("wm_init: MCLBYTES %d unsupported",
   6186 				    MCLBYTES);
   6187 				break;
   6188 			}
   6189 		} else
   6190 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6191 	}
   6192 
   6193 	/* Enable ECC */
   6194 	switch (sc->sc_type) {
   6195 	case WM_T_82571:
   6196 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6197 		reg |= PBA_ECC_CORR_EN;
   6198 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6199 		break;
   6200 	case WM_T_PCH_LPT:
   6201 	case WM_T_PCH_SPT:
   6202 	case WM_T_PCH_CNP:
   6203 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6204 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6205 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6206 
   6207 		sc->sc_ctrl |= CTRL_MEHE;
   6208 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6209 		break;
   6210 	default:
   6211 		break;
   6212 	}
   6213 
   6214 	/*
   6215 	 * Set the receive filter.
   6216 	 *
   6217 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6218 	 * the setting of RCTL.EN in wm_set_filter()
   6219 	 */
   6220 	wm_set_filter(sc);
   6221 
   6222 	/* On 575 and later set RDT only if RX enabled */
   6223 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6224 		int qidx;
   6225 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6226 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6227 			for (i = 0; i < WM_NRXDESC; i++) {
   6228 				mutex_enter(rxq->rxq_lock);
   6229 				wm_init_rxdesc(rxq, i);
   6230 				mutex_exit(rxq->rxq_lock);
   6231 
   6232 			}
   6233 		}
   6234 	}
   6235 
   6236 	wm_unset_stopping_flags(sc);
   6237 
   6238 	/* Start the one second link check clock. */
   6239 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6240 
   6241 	/* ...all done! */
   6242 	ifp->if_flags |= IFF_RUNNING;
   6243 	ifp->if_flags &= ~IFF_OACTIVE;
   6244 
   6245  out:
   6246 	/* Save last flags for the callback */
   6247 	sc->sc_if_flags = ifp->if_flags;
   6248 	sc->sc_ec_capenable = ec->ec_capenable;
   6249 	if (error)
   6250 		log(LOG_ERR, "%s: interface not running\n",
   6251 		    device_xname(sc->sc_dev));
   6252 	return error;
   6253 }
   6254 
   6255 /*
   6256  * wm_stop:		[ifnet interface function]
   6257  *
   6258  *	Stop transmission on the interface.
   6259  */
   6260 static void
   6261 wm_stop(struct ifnet *ifp, int disable)
   6262 {
   6263 	struct wm_softc *sc = ifp->if_softc;
   6264 
   6265 	WM_CORE_LOCK(sc);
   6266 	wm_stop_locked(ifp, disable);
   6267 	WM_CORE_UNLOCK(sc);
   6268 }
   6269 
   6270 static void
   6271 wm_stop_locked(struct ifnet *ifp, int disable)
   6272 {
   6273 	struct wm_softc *sc = ifp->if_softc;
   6274 	struct wm_txsoft *txs;
   6275 	int i, qidx;
   6276 
   6277 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6278 		device_xname(sc->sc_dev), __func__));
   6279 	KASSERT(WM_CORE_LOCKED(sc));
   6280 
   6281 	wm_set_stopping_flags(sc);
   6282 
   6283 	/* Stop the one second clock. */
   6284 	callout_stop(&sc->sc_tick_ch);
   6285 
   6286 	/* Stop the 82547 Tx FIFO stall check timer. */
   6287 	if (sc->sc_type == WM_T_82547)
   6288 		callout_stop(&sc->sc_txfifo_ch);
   6289 
   6290 	if (sc->sc_flags & WM_F_HAS_MII) {
   6291 		/* Down the MII. */
   6292 		mii_down(&sc->sc_mii);
   6293 	} else {
   6294 #if 0
   6295 		/* Should we clear PHY's status properly? */
   6296 		wm_reset(sc);
   6297 #endif
   6298 	}
   6299 
   6300 	/* Stop the transmit and receive processes. */
   6301 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6302 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6303 	sc->sc_rctl &= ~RCTL_EN;
   6304 
   6305 	/*
   6306 	 * Clear the interrupt mask to ensure the device cannot assert its
   6307 	 * interrupt line.
   6308 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6309 	 * service any currently pending or shared interrupt.
   6310 	 */
   6311 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6312 	sc->sc_icr = 0;
   6313 	if (wm_is_using_msix(sc)) {
   6314 		if (sc->sc_type != WM_T_82574) {
   6315 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6316 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6317 		} else
   6318 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6319 	}
   6320 
   6321 	/* Release any queued transmit buffers. */
   6322 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6323 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6324 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6325 		mutex_enter(txq->txq_lock);
   6326 		txq->txq_sending = false; /* ensure watchdog disabled */
   6327 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6328 			txs = &txq->txq_soft[i];
   6329 			if (txs->txs_mbuf != NULL) {
   6330 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6331 				m_freem(txs->txs_mbuf);
   6332 				txs->txs_mbuf = NULL;
   6333 			}
   6334 		}
   6335 		mutex_exit(txq->txq_lock);
   6336 	}
   6337 
   6338 	/* Mark the interface as down and cancel the watchdog timer. */
   6339 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6340 
   6341 	if (disable) {
   6342 		for (i = 0; i < sc->sc_nqueues; i++) {
   6343 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6344 			mutex_enter(rxq->rxq_lock);
   6345 			wm_rxdrain(rxq);
   6346 			mutex_exit(rxq->rxq_lock);
   6347 		}
   6348 	}
   6349 
   6350 #if 0 /* notyet */
   6351 	if (sc->sc_type >= WM_T_82544)
   6352 		CSR_WRITE(sc, WMREG_WUC, 0);
   6353 #endif
   6354 }
   6355 
   6356 static void
   6357 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6358 {
   6359 	struct mbuf *m;
   6360 	int i;
   6361 
   6362 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6363 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6364 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6365 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6366 		    m->m_data, m->m_len, m->m_flags);
   6367 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6368 	    i, i == 1 ? "" : "s");
   6369 }
   6370 
   6371 /*
   6372  * wm_82547_txfifo_stall:
   6373  *
   6374  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6375  *	reset the FIFO pointers, and restart packet transmission.
   6376  */
   6377 static void
   6378 wm_82547_txfifo_stall(void *arg)
   6379 {
   6380 	struct wm_softc *sc = arg;
   6381 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6382 
   6383 	mutex_enter(txq->txq_lock);
   6384 
   6385 	if (txq->txq_stopping)
   6386 		goto out;
   6387 
   6388 	if (txq->txq_fifo_stall) {
   6389 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6390 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6391 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6392 			/*
   6393 			 * Packets have drained.  Stop transmitter, reset
   6394 			 * FIFO pointers, restart transmitter, and kick
   6395 			 * the packet queue.
   6396 			 */
   6397 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6398 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6399 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6400 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6401 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6402 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6403 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6404 			CSR_WRITE_FLUSH(sc);
   6405 
   6406 			txq->txq_fifo_head = 0;
   6407 			txq->txq_fifo_stall = 0;
   6408 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6409 		} else {
   6410 			/*
   6411 			 * Still waiting for packets to drain; try again in
   6412 			 * another tick.
   6413 			 */
   6414 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6415 		}
   6416 	}
   6417 
   6418 out:
   6419 	mutex_exit(txq->txq_lock);
   6420 }
   6421 
   6422 /*
   6423  * wm_82547_txfifo_bugchk:
   6424  *
   6425  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6426  *	prevent enqueueing a packet that would wrap around the end
   6427  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6428  *
   6429  *	We do this by checking the amount of space before the end
   6430  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6431  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6432  *	the internal FIFO pointers to the beginning, and restart
   6433  *	transmission on the interface.
   6434  */
   6435 #define	WM_FIFO_HDR		0x10
   6436 #define	WM_82547_PAD_LEN	0x3e0
   6437 static int
   6438 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6439 {
   6440 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6441 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6442 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6443 
   6444 	/* Just return if already stalled. */
   6445 	if (txq->txq_fifo_stall)
   6446 		return 1;
   6447 
   6448 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6449 		/* Stall only occurs in half-duplex mode. */
   6450 		goto send_packet;
   6451 	}
   6452 
   6453 	if (len >= WM_82547_PAD_LEN + space) {
   6454 		txq->txq_fifo_stall = 1;
   6455 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6456 		return 1;
   6457 	}
   6458 
   6459  send_packet:
   6460 	txq->txq_fifo_head += len;
   6461 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6462 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6463 
   6464 	return 0;
   6465 }
   6466 
   6467 static int
   6468 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6469 {
   6470 	int error;
   6471 
   6472 	/*
   6473 	 * Allocate the control data structures, and create and load the
   6474 	 * DMA map for it.
   6475 	 *
   6476 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6477 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6478 	 * both sets within the same 4G segment.
   6479 	 */
   6480 	if (sc->sc_type < WM_T_82544)
   6481 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6482 	else
   6483 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6484 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6485 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6486 	else
   6487 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6488 
   6489 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6490 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6491 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6492 		aprint_error_dev(sc->sc_dev,
   6493 		    "unable to allocate TX control data, error = %d\n",
   6494 		    error);
   6495 		goto fail_0;
   6496 	}
   6497 
   6498 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6499 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6500 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6501 		aprint_error_dev(sc->sc_dev,
   6502 		    "unable to map TX control data, error = %d\n", error);
   6503 		goto fail_1;
   6504 	}
   6505 
   6506 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6507 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6508 		aprint_error_dev(sc->sc_dev,
   6509 		    "unable to create TX control data DMA map, error = %d\n",
   6510 		    error);
   6511 		goto fail_2;
   6512 	}
   6513 
   6514 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6515 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6516 		aprint_error_dev(sc->sc_dev,
   6517 		    "unable to load TX control data DMA map, error = %d\n",
   6518 		    error);
   6519 		goto fail_3;
   6520 	}
   6521 
   6522 	return 0;
   6523 
   6524  fail_3:
   6525 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6526  fail_2:
   6527 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6528 	    WM_TXDESCS_SIZE(txq));
   6529  fail_1:
   6530 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6531  fail_0:
   6532 	return error;
   6533 }
   6534 
   6535 static void
   6536 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6537 {
   6538 
   6539 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6540 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6541 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6542 	    WM_TXDESCS_SIZE(txq));
   6543 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6544 }
   6545 
   6546 static int
   6547 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6548 {
   6549 	int error;
   6550 	size_t rxq_descs_size;
   6551 
   6552 	/*
   6553 	 * Allocate the control data structures, and create and load the
   6554 	 * DMA map for it.
   6555 	 *
   6556 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6557 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6558 	 * both sets within the same 4G segment.
   6559 	 */
   6560 	rxq->rxq_ndesc = WM_NRXDESC;
   6561 	if (sc->sc_type == WM_T_82574)
   6562 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6563 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6564 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6565 	else
   6566 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6567 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6568 
   6569 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6570 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6571 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6572 		aprint_error_dev(sc->sc_dev,
   6573 		    "unable to allocate RX control data, error = %d\n",
   6574 		    error);
   6575 		goto fail_0;
   6576 	}
   6577 
   6578 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6579 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6580 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6581 		aprint_error_dev(sc->sc_dev,
   6582 		    "unable to map RX control data, error = %d\n", error);
   6583 		goto fail_1;
   6584 	}
   6585 
   6586 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6587 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6588 		aprint_error_dev(sc->sc_dev,
   6589 		    "unable to create RX control data DMA map, error = %d\n",
   6590 		    error);
   6591 		goto fail_2;
   6592 	}
   6593 
   6594 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6595 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6596 		aprint_error_dev(sc->sc_dev,
   6597 		    "unable to load RX control data DMA map, error = %d\n",
   6598 		    error);
   6599 		goto fail_3;
   6600 	}
   6601 
   6602 	return 0;
   6603 
   6604  fail_3:
   6605 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6606  fail_2:
   6607 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6608 	    rxq_descs_size);
   6609  fail_1:
   6610 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6611  fail_0:
   6612 	return error;
   6613 }
   6614 
   6615 static void
   6616 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6617 {
   6618 
   6619 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6620 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6621 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6622 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6623 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6624 }
   6625 
   6626 
   6627 static int
   6628 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6629 {
   6630 	int i, error;
   6631 
   6632 	/* Create the transmit buffer DMA maps. */
   6633 	WM_TXQUEUELEN(txq) =
   6634 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6635 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6636 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6637 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6638 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6639 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6640 			aprint_error_dev(sc->sc_dev,
   6641 			    "unable to create Tx DMA map %d, error = %d\n",
   6642 			    i, error);
   6643 			goto fail;
   6644 		}
   6645 	}
   6646 
   6647 	return 0;
   6648 
   6649  fail:
   6650 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6651 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6652 			bus_dmamap_destroy(sc->sc_dmat,
   6653 			    txq->txq_soft[i].txs_dmamap);
   6654 	}
   6655 	return error;
   6656 }
   6657 
   6658 static void
   6659 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6660 {
   6661 	int i;
   6662 
   6663 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6664 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6665 			bus_dmamap_destroy(sc->sc_dmat,
   6666 			    txq->txq_soft[i].txs_dmamap);
   6667 	}
   6668 }
   6669 
   6670 static int
   6671 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6672 {
   6673 	int i, error;
   6674 
   6675 	/* Create the receive buffer DMA maps. */
   6676 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6677 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6678 			    MCLBYTES, 0, 0,
   6679 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6680 			aprint_error_dev(sc->sc_dev,
   6681 			    "unable to create Rx DMA map %d error = %d\n",
   6682 			    i, error);
   6683 			goto fail;
   6684 		}
   6685 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6686 	}
   6687 
   6688 	return 0;
   6689 
   6690  fail:
   6691 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6692 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6693 			bus_dmamap_destroy(sc->sc_dmat,
   6694 			    rxq->rxq_soft[i].rxs_dmamap);
   6695 	}
   6696 	return error;
   6697 }
   6698 
   6699 static void
   6700 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6701 {
   6702 	int i;
   6703 
   6704 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6705 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6706 			bus_dmamap_destroy(sc->sc_dmat,
   6707 			    rxq->rxq_soft[i].rxs_dmamap);
   6708 	}
   6709 }
   6710 
   6711 /*
   6712  * wm_alloc_quques:
   6713  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6714  */
   6715 static int
   6716 wm_alloc_txrx_queues(struct wm_softc *sc)
   6717 {
   6718 	int i, error, tx_done, rx_done;
   6719 
   6720 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6721 	    KM_SLEEP);
   6722 	if (sc->sc_queue == NULL) {
   6723 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6724 		error = ENOMEM;
   6725 		goto fail_0;
   6726 	}
   6727 
   6728 	/*
   6729 	 * For transmission
   6730 	 */
   6731 	error = 0;
   6732 	tx_done = 0;
   6733 	for (i = 0; i < sc->sc_nqueues; i++) {
   6734 #ifdef WM_EVENT_COUNTERS
   6735 		int j;
   6736 		const char *xname;
   6737 #endif
   6738 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6739 		txq->txq_sc = sc;
   6740 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6741 
   6742 		error = wm_alloc_tx_descs(sc, txq);
   6743 		if (error)
   6744 			break;
   6745 		error = wm_alloc_tx_buffer(sc, txq);
   6746 		if (error) {
   6747 			wm_free_tx_descs(sc, txq);
   6748 			break;
   6749 		}
   6750 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6751 		if (txq->txq_interq == NULL) {
   6752 			wm_free_tx_descs(sc, txq);
   6753 			wm_free_tx_buffer(sc, txq);
   6754 			error = ENOMEM;
   6755 			break;
   6756 		}
   6757 
   6758 #ifdef WM_EVENT_COUNTERS
   6759 		xname = device_xname(sc->sc_dev);
   6760 
   6761 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6762 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6764 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6765 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6772 
   6773 		for (j = 0; j < WM_NTXSEGS; j++) {
   6774 			snprintf(txq->txq_txseg_evcnt_names[j],
   6775 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6776 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6777 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6778 		}
   6779 
   6780 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6781 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6782 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6783 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6784 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6785 #endif /* WM_EVENT_COUNTERS */
   6786 
   6787 		tx_done++;
   6788 	}
   6789 	if (error)
   6790 		goto fail_1;
   6791 
   6792 	/*
   6793 	 * For recieve
   6794 	 */
   6795 	error = 0;
   6796 	rx_done = 0;
   6797 	for (i = 0; i < sc->sc_nqueues; i++) {
   6798 #ifdef WM_EVENT_COUNTERS
   6799 		const char *xname;
   6800 #endif
   6801 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6802 		rxq->rxq_sc = sc;
   6803 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6804 
   6805 		error = wm_alloc_rx_descs(sc, rxq);
   6806 		if (error)
   6807 			break;
   6808 
   6809 		error = wm_alloc_rx_buffer(sc, rxq);
   6810 		if (error) {
   6811 			wm_free_rx_descs(sc, rxq);
   6812 			break;
   6813 		}
   6814 
   6815 #ifdef WM_EVENT_COUNTERS
   6816 		xname = device_xname(sc->sc_dev);
   6817 
   6818 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6819 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6820 
   6821 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6822 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6823 #endif /* WM_EVENT_COUNTERS */
   6824 
   6825 		rx_done++;
   6826 	}
   6827 	if (error)
   6828 		goto fail_2;
   6829 
   6830 	return 0;
   6831 
   6832  fail_2:
   6833 	for (i = 0; i < rx_done; i++) {
   6834 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6835 		wm_free_rx_buffer(sc, rxq);
   6836 		wm_free_rx_descs(sc, rxq);
   6837 		if (rxq->rxq_lock)
   6838 			mutex_obj_free(rxq->rxq_lock);
   6839 	}
   6840  fail_1:
   6841 	for (i = 0; i < tx_done; i++) {
   6842 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6843 		pcq_destroy(txq->txq_interq);
   6844 		wm_free_tx_buffer(sc, txq);
   6845 		wm_free_tx_descs(sc, txq);
   6846 		if (txq->txq_lock)
   6847 			mutex_obj_free(txq->txq_lock);
   6848 	}
   6849 
   6850 	kmem_free(sc->sc_queue,
   6851 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6852  fail_0:
   6853 	return error;
   6854 }
   6855 
   6856 /*
   6857  * wm_free_quques:
   6858  *	Free {tx,rx}descs and {tx,rx} buffers
   6859  */
   6860 static void
   6861 wm_free_txrx_queues(struct wm_softc *sc)
   6862 {
   6863 	int i;
   6864 
   6865 	for (i = 0; i < sc->sc_nqueues; i++) {
   6866 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6867 
   6868 #ifdef WM_EVENT_COUNTERS
   6869 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6870 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6871 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6872 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6873 #endif /* WM_EVENT_COUNTERS */
   6874 
   6875 		wm_free_rx_buffer(sc, rxq);
   6876 		wm_free_rx_descs(sc, rxq);
   6877 		if (rxq->rxq_lock)
   6878 			mutex_obj_free(rxq->rxq_lock);
   6879 	}
   6880 
   6881 	for (i = 0; i < sc->sc_nqueues; i++) {
   6882 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6883 		struct mbuf *m;
   6884 #ifdef WM_EVENT_COUNTERS
   6885 		int j;
   6886 
   6887 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6888 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6893 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6894 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6895 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6896 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6897 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6898 
   6899 		for (j = 0; j < WM_NTXSEGS; j++)
   6900 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6901 
   6902 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6905 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6906 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6907 #endif /* WM_EVENT_COUNTERS */
   6908 
   6909 		/* drain txq_interq */
   6910 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6911 			m_freem(m);
   6912 		pcq_destroy(txq->txq_interq);
   6913 
   6914 		wm_free_tx_buffer(sc, txq);
   6915 		wm_free_tx_descs(sc, txq);
   6916 		if (txq->txq_lock)
   6917 			mutex_obj_free(txq->txq_lock);
   6918 	}
   6919 
   6920 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6921 }
   6922 
   6923 static void
   6924 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6925 {
   6926 
   6927 	KASSERT(mutex_owned(txq->txq_lock));
   6928 
   6929 	/* Initialize the transmit descriptor ring. */
   6930 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6931 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6932 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6933 	txq->txq_free = WM_NTXDESC(txq);
   6934 	txq->txq_next = 0;
   6935 }
   6936 
   6937 static void
   6938 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6939     struct wm_txqueue *txq)
   6940 {
   6941 
   6942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6943 		device_xname(sc->sc_dev), __func__));
   6944 	KASSERT(mutex_owned(txq->txq_lock));
   6945 
   6946 	if (sc->sc_type < WM_T_82543) {
   6947 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6948 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6949 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6950 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6951 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6952 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6953 	} else {
   6954 		int qid = wmq->wmq_id;
   6955 
   6956 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6957 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6958 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6959 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6960 
   6961 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6962 			/*
   6963 			 * Don't write TDT before TCTL.EN is set.
   6964 			 * See the document.
   6965 			 */
   6966 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6967 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6968 			    | TXDCTL_WTHRESH(0));
   6969 		else {
   6970 			/* XXX should update with AIM? */
   6971 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6972 			if (sc->sc_type >= WM_T_82540) {
   6973 				/* should be same */
   6974 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6975 			}
   6976 
   6977 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6978 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6979 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6980 		}
   6981 	}
   6982 }
   6983 
   6984 static void
   6985 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6986 {
   6987 	int i;
   6988 
   6989 	KASSERT(mutex_owned(txq->txq_lock));
   6990 
   6991 	/* Initialize the transmit job descriptors. */
   6992 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6993 		txq->txq_soft[i].txs_mbuf = NULL;
   6994 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6995 	txq->txq_snext = 0;
   6996 	txq->txq_sdirty = 0;
   6997 }
   6998 
   6999 static void
   7000 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7001     struct wm_txqueue *txq)
   7002 {
   7003 
   7004 	KASSERT(mutex_owned(txq->txq_lock));
   7005 
   7006 	/*
   7007 	 * Set up some register offsets that are different between
   7008 	 * the i82542 and the i82543 and later chips.
   7009 	 */
   7010 	if (sc->sc_type < WM_T_82543)
   7011 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7012 	else
   7013 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7014 
   7015 	wm_init_tx_descs(sc, txq);
   7016 	wm_init_tx_regs(sc, wmq, txq);
   7017 	wm_init_tx_buffer(sc, txq);
   7018 
   7019 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7020 	txq->txq_sending = false;
   7021 }
   7022 
   7023 static void
   7024 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7025     struct wm_rxqueue *rxq)
   7026 {
   7027 
   7028 	KASSERT(mutex_owned(rxq->rxq_lock));
   7029 
   7030 	/*
   7031 	 * Initialize the receive descriptor and receive job
   7032 	 * descriptor rings.
   7033 	 */
   7034 	if (sc->sc_type < WM_T_82543) {
   7035 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7036 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7037 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7038 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7039 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7040 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7041 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7042 
   7043 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7044 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7045 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7046 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7047 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7048 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7049 	} else {
   7050 		int qid = wmq->wmq_id;
   7051 
   7052 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7053 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7054 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7055 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7056 
   7057 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7058 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7059 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7060 
   7061 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7062 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7063 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7064 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7065 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7066 			    | RXDCTL_WTHRESH(1));
   7067 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7068 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7069 		} else {
   7070 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7071 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7072 			/* XXX should update with AIM? */
   7073 			CSR_WRITE(sc, WMREG_RDTR,
   7074 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7075 			/* MUST be same */
   7076 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7077 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7078 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7079 		}
   7080 	}
   7081 }
   7082 
   7083 static int
   7084 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7085 {
   7086 	struct wm_rxsoft *rxs;
   7087 	int error, i;
   7088 
   7089 	KASSERT(mutex_owned(rxq->rxq_lock));
   7090 
   7091 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7092 		rxs = &rxq->rxq_soft[i];
   7093 		if (rxs->rxs_mbuf == NULL) {
   7094 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7095 				log(LOG_ERR, "%s: unable to allocate or map "
   7096 				    "rx buffer %d, error = %d\n",
   7097 				    device_xname(sc->sc_dev), i, error);
   7098 				/*
   7099 				 * XXX Should attempt to run with fewer receive
   7100 				 * XXX buffers instead of just failing.
   7101 				 */
   7102 				wm_rxdrain(rxq);
   7103 				return ENOMEM;
   7104 			}
   7105 		} else {
   7106 			/*
   7107 			 * For 82575 and 82576, the RX descriptors must be
   7108 			 * initialized after the setting of RCTL.EN in
   7109 			 * wm_set_filter()
   7110 			 */
   7111 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7112 				wm_init_rxdesc(rxq, i);
   7113 		}
   7114 	}
   7115 	rxq->rxq_ptr = 0;
   7116 	rxq->rxq_discard = 0;
   7117 	WM_RXCHAIN_RESET(rxq);
   7118 
   7119 	return 0;
   7120 }
   7121 
   7122 static int
   7123 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7124     struct wm_rxqueue *rxq)
   7125 {
   7126 
   7127 	KASSERT(mutex_owned(rxq->rxq_lock));
   7128 
   7129 	/*
   7130 	 * Set up some register offsets that are different between
   7131 	 * the i82542 and the i82543 and later chips.
   7132 	 */
   7133 	if (sc->sc_type < WM_T_82543)
   7134 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7135 	else
   7136 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7137 
   7138 	wm_init_rx_regs(sc, wmq, rxq);
   7139 	return wm_init_rx_buffer(sc, rxq);
   7140 }
   7141 
   7142 /*
   7143  * wm_init_quques:
   7144  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7145  */
   7146 static int
   7147 wm_init_txrx_queues(struct wm_softc *sc)
   7148 {
   7149 	int i, error = 0;
   7150 
   7151 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7152 		device_xname(sc->sc_dev), __func__));
   7153 
   7154 	for (i = 0; i < sc->sc_nqueues; i++) {
   7155 		struct wm_queue *wmq = &sc->sc_queue[i];
   7156 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7157 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7158 
   7159 		/*
   7160 		 * TODO
   7161 		 * Currently, use constant variable instead of AIM.
   7162 		 * Furthermore, the interrupt interval of multiqueue which use
   7163 		 * polling mode is less than default value.
   7164 		 * More tuning and AIM are required.
   7165 		 */
   7166 		if (wm_is_using_multiqueue(sc))
   7167 			wmq->wmq_itr = 50;
   7168 		else
   7169 			wmq->wmq_itr = sc->sc_itr_init;
   7170 		wmq->wmq_set_itr = true;
   7171 
   7172 		mutex_enter(txq->txq_lock);
   7173 		wm_init_tx_queue(sc, wmq, txq);
   7174 		mutex_exit(txq->txq_lock);
   7175 
   7176 		mutex_enter(rxq->rxq_lock);
   7177 		error = wm_init_rx_queue(sc, wmq, rxq);
   7178 		mutex_exit(rxq->rxq_lock);
   7179 		if (error)
   7180 			break;
   7181 	}
   7182 
   7183 	return error;
   7184 }
   7185 
   7186 /*
   7187  * wm_tx_offload:
   7188  *
   7189  *	Set up TCP/IP checksumming parameters for the
   7190  *	specified packet.
   7191  */
   7192 static int
   7193 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7194     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7195 {
   7196 	struct mbuf *m0 = txs->txs_mbuf;
   7197 	struct livengood_tcpip_ctxdesc *t;
   7198 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7199 	uint32_t ipcse;
   7200 	struct ether_header *eh;
   7201 	int offset, iphl;
   7202 	uint8_t fields;
   7203 
   7204 	/*
   7205 	 * XXX It would be nice if the mbuf pkthdr had offset
   7206 	 * fields for the protocol headers.
   7207 	 */
   7208 
   7209 	eh = mtod(m0, struct ether_header *);
   7210 	switch (htons(eh->ether_type)) {
   7211 	case ETHERTYPE_IP:
   7212 	case ETHERTYPE_IPV6:
   7213 		offset = ETHER_HDR_LEN;
   7214 		break;
   7215 
   7216 	case ETHERTYPE_VLAN:
   7217 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7218 		break;
   7219 
   7220 	default:
   7221 		/*
   7222 		 * Don't support this protocol or encapsulation.
   7223 		 */
   7224 		*fieldsp = 0;
   7225 		*cmdp = 0;
   7226 		return 0;
   7227 	}
   7228 
   7229 	if ((m0->m_pkthdr.csum_flags &
   7230 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7231 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7232 	} else
   7233 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7234 
   7235 	ipcse = offset + iphl - 1;
   7236 
   7237 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7238 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7239 	seg = 0;
   7240 	fields = 0;
   7241 
   7242 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7243 		int hlen = offset + iphl;
   7244 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7245 
   7246 		if (__predict_false(m0->m_len <
   7247 				    (hlen + sizeof(struct tcphdr)))) {
   7248 			/*
   7249 			 * TCP/IP headers are not in the first mbuf; we need
   7250 			 * to do this the slow and painful way. Let's just
   7251 			 * hope this doesn't happen very often.
   7252 			 */
   7253 			struct tcphdr th;
   7254 
   7255 			WM_Q_EVCNT_INCR(txq, tsopain);
   7256 
   7257 			m_copydata(m0, hlen, sizeof(th), &th);
   7258 			if (v4) {
   7259 				struct ip ip;
   7260 
   7261 				m_copydata(m0, offset, sizeof(ip), &ip);
   7262 				ip.ip_len = 0;
   7263 				m_copyback(m0,
   7264 				    offset + offsetof(struct ip, ip_len),
   7265 				    sizeof(ip.ip_len), &ip.ip_len);
   7266 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7267 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7268 			} else {
   7269 				struct ip6_hdr ip6;
   7270 
   7271 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7272 				ip6.ip6_plen = 0;
   7273 				m_copyback(m0,
   7274 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7275 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7276 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7277 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7278 			}
   7279 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7280 			    sizeof(th.th_sum), &th.th_sum);
   7281 
   7282 			hlen += th.th_off << 2;
   7283 		} else {
   7284 			/*
   7285 			 * TCP/IP headers are in the first mbuf; we can do
   7286 			 * this the easy way.
   7287 			 */
   7288 			struct tcphdr *th;
   7289 
   7290 			if (v4) {
   7291 				struct ip *ip =
   7292 				    (void *)(mtod(m0, char *) + offset);
   7293 				th = (void *)(mtod(m0, char *) + hlen);
   7294 
   7295 				ip->ip_len = 0;
   7296 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7297 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7298 			} else {
   7299 				struct ip6_hdr *ip6 =
   7300 				    (void *)(mtod(m0, char *) + offset);
   7301 				th = (void *)(mtod(m0, char *) + hlen);
   7302 
   7303 				ip6->ip6_plen = 0;
   7304 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7305 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7306 			}
   7307 			hlen += th->th_off << 2;
   7308 		}
   7309 
   7310 		if (v4) {
   7311 			WM_Q_EVCNT_INCR(txq, tso);
   7312 			cmdlen |= WTX_TCPIP_CMD_IP;
   7313 		} else {
   7314 			WM_Q_EVCNT_INCR(txq, tso6);
   7315 			ipcse = 0;
   7316 		}
   7317 		cmd |= WTX_TCPIP_CMD_TSE;
   7318 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7319 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7320 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7321 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7322 	}
   7323 
   7324 	/*
   7325 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7326 	 * offload feature, if we load the context descriptor, we
   7327 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7328 	 */
   7329 
   7330 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7331 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7332 	    WTX_TCPIP_IPCSE(ipcse);
   7333 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7334 		WM_Q_EVCNT_INCR(txq, ipsum);
   7335 		fields |= WTX_IXSM;
   7336 	}
   7337 
   7338 	offset += iphl;
   7339 
   7340 	if (m0->m_pkthdr.csum_flags &
   7341 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7342 		WM_Q_EVCNT_INCR(txq, tusum);
   7343 		fields |= WTX_TXSM;
   7344 		tucs = WTX_TCPIP_TUCSS(offset) |
   7345 		    WTX_TCPIP_TUCSO(offset +
   7346 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7347 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7348 	} else if ((m0->m_pkthdr.csum_flags &
   7349 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7350 		WM_Q_EVCNT_INCR(txq, tusum6);
   7351 		fields |= WTX_TXSM;
   7352 		tucs = WTX_TCPIP_TUCSS(offset) |
   7353 		    WTX_TCPIP_TUCSO(offset +
   7354 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7355 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7356 	} else {
   7357 		/* Just initialize it to a valid TCP context. */
   7358 		tucs = WTX_TCPIP_TUCSS(offset) |
   7359 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7360 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7361 	}
   7362 
   7363 	/*
   7364 	 * We don't have to write context descriptor for every packet
   7365 	 * except for 82574. For 82574, we must write context descriptor
   7366 	 * for every packet when we use two descriptor queues.
   7367 	 * It would be overhead to write context descriptor for every packet,
   7368 	 * however it does not cause problems.
   7369 	 */
   7370 	/* Fill in the context descriptor. */
   7371 	t = (struct livengood_tcpip_ctxdesc *)
   7372 	    &txq->txq_descs[txq->txq_next];
   7373 	t->tcpip_ipcs = htole32(ipcs);
   7374 	t->tcpip_tucs = htole32(tucs);
   7375 	t->tcpip_cmdlen = htole32(cmdlen);
   7376 	t->tcpip_seg = htole32(seg);
   7377 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7378 
   7379 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7380 	txs->txs_ndesc++;
   7381 
   7382 	*cmdp = cmd;
   7383 	*fieldsp = fields;
   7384 
   7385 	return 0;
   7386 }
   7387 
   7388 static inline int
   7389 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7390 {
   7391 	struct wm_softc *sc = ifp->if_softc;
   7392 	u_int cpuid = cpu_index(curcpu());
   7393 
   7394 	/*
   7395 	 * Currently, simple distribute strategy.
   7396 	 * TODO:
   7397 	 * distribute by flowid(RSS has value).
   7398 	 */
   7399 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7400 }
   7401 
   7402 /*
   7403  * wm_start:		[ifnet interface function]
   7404  *
   7405  *	Start packet transmission on the interface.
   7406  */
   7407 static void
   7408 wm_start(struct ifnet *ifp)
   7409 {
   7410 	struct wm_softc *sc = ifp->if_softc;
   7411 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7412 
   7413 #ifdef WM_MPSAFE
   7414 	KASSERT(if_is_mpsafe(ifp));
   7415 #endif
   7416 	/*
   7417 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7418 	 */
   7419 
   7420 	mutex_enter(txq->txq_lock);
   7421 	if (!txq->txq_stopping)
   7422 		wm_start_locked(ifp);
   7423 	mutex_exit(txq->txq_lock);
   7424 }
   7425 
   7426 static void
   7427 wm_start_locked(struct ifnet *ifp)
   7428 {
   7429 	struct wm_softc *sc = ifp->if_softc;
   7430 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7431 
   7432 	wm_send_common_locked(ifp, txq, false);
   7433 }
   7434 
   7435 static int
   7436 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7437 {
   7438 	int qid;
   7439 	struct wm_softc *sc = ifp->if_softc;
   7440 	struct wm_txqueue *txq;
   7441 
   7442 	qid = wm_select_txqueue(ifp, m);
   7443 	txq = &sc->sc_queue[qid].wmq_txq;
   7444 
   7445 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7446 		m_freem(m);
   7447 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7448 		return ENOBUFS;
   7449 	}
   7450 
   7451 	/*
   7452 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7453 	 */
   7454 	ifp->if_obytes += m->m_pkthdr.len;
   7455 	if (m->m_flags & M_MCAST)
   7456 		ifp->if_omcasts++;
   7457 
   7458 	if (mutex_tryenter(txq->txq_lock)) {
   7459 		if (!txq->txq_stopping)
   7460 			wm_transmit_locked(ifp, txq);
   7461 		mutex_exit(txq->txq_lock);
   7462 	}
   7463 
   7464 	return 0;
   7465 }
   7466 
   7467 static void
   7468 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7469 {
   7470 
   7471 	wm_send_common_locked(ifp, txq, true);
   7472 }
   7473 
   7474 static void
   7475 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7476     bool is_transmit)
   7477 {
   7478 	struct wm_softc *sc = ifp->if_softc;
   7479 	struct mbuf *m0;
   7480 	struct wm_txsoft *txs;
   7481 	bus_dmamap_t dmamap;
   7482 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7483 	bus_addr_t curaddr;
   7484 	bus_size_t seglen, curlen;
   7485 	uint32_t cksumcmd;
   7486 	uint8_t cksumfields;
   7487 	bool remap = true;
   7488 
   7489 	KASSERT(mutex_owned(txq->txq_lock));
   7490 
   7491 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7492 		return;
   7493 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7494 		return;
   7495 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7496 		return;
   7497 
   7498 	/* Remember the previous number of free descriptors. */
   7499 	ofree = txq->txq_free;
   7500 
   7501 	/*
   7502 	 * Loop through the send queue, setting up transmit descriptors
   7503 	 * until we drain the queue, or use up all available transmit
   7504 	 * descriptors.
   7505 	 */
   7506 	for (;;) {
   7507 		m0 = NULL;
   7508 
   7509 		/* Get a work queue entry. */
   7510 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7511 			wm_txeof(txq, UINT_MAX);
   7512 			if (txq->txq_sfree == 0) {
   7513 				DPRINTF(WM_DEBUG_TX,
   7514 				    ("%s: TX: no free job descriptors\n",
   7515 					device_xname(sc->sc_dev)));
   7516 				WM_Q_EVCNT_INCR(txq, txsstall);
   7517 				break;
   7518 			}
   7519 		}
   7520 
   7521 		/* Grab a packet off the queue. */
   7522 		if (is_transmit)
   7523 			m0 = pcq_get(txq->txq_interq);
   7524 		else
   7525 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7526 		if (m0 == NULL)
   7527 			break;
   7528 
   7529 		DPRINTF(WM_DEBUG_TX,
   7530 		    ("%s: TX: have packet to transmit: %p\n",
   7531 			device_xname(sc->sc_dev), m0));
   7532 
   7533 		txs = &txq->txq_soft[txq->txq_snext];
   7534 		dmamap = txs->txs_dmamap;
   7535 
   7536 		use_tso = (m0->m_pkthdr.csum_flags &
   7537 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7538 
   7539 		/*
   7540 		 * So says the Linux driver:
   7541 		 * The controller does a simple calculation to make sure
   7542 		 * there is enough room in the FIFO before initiating the
   7543 		 * DMA for each buffer. The calc is:
   7544 		 *	4 = ceil(buffer len / MSS)
   7545 		 * To make sure we don't overrun the FIFO, adjust the max
   7546 		 * buffer len if the MSS drops.
   7547 		 */
   7548 		dmamap->dm_maxsegsz =
   7549 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7550 		    ? m0->m_pkthdr.segsz << 2
   7551 		    : WTX_MAX_LEN;
   7552 
   7553 		/*
   7554 		 * Load the DMA map.  If this fails, the packet either
   7555 		 * didn't fit in the allotted number of segments, or we
   7556 		 * were short on resources.  For the too-many-segments
   7557 		 * case, we simply report an error and drop the packet,
   7558 		 * since we can't sanely copy a jumbo packet to a single
   7559 		 * buffer.
   7560 		 */
   7561 retry:
   7562 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7563 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7564 		if (__predict_false(error)) {
   7565 			if (error == EFBIG) {
   7566 				if (remap == true) {
   7567 					struct mbuf *m;
   7568 
   7569 					remap = false;
   7570 					m = m_defrag(m0, M_NOWAIT);
   7571 					if (m != NULL) {
   7572 						WM_Q_EVCNT_INCR(txq, defrag);
   7573 						m0 = m;
   7574 						goto retry;
   7575 					}
   7576 				}
   7577 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7578 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7579 				    "DMA segments, dropping...\n",
   7580 				    device_xname(sc->sc_dev));
   7581 				wm_dump_mbuf_chain(sc, m0);
   7582 				m_freem(m0);
   7583 				continue;
   7584 			}
   7585 			/*  Short on resources, just stop for now. */
   7586 			DPRINTF(WM_DEBUG_TX,
   7587 			    ("%s: TX: dmamap load failed: %d\n",
   7588 				device_xname(sc->sc_dev), error));
   7589 			break;
   7590 		}
   7591 
   7592 		segs_needed = dmamap->dm_nsegs;
   7593 		if (use_tso) {
   7594 			/* For sentinel descriptor; see below. */
   7595 			segs_needed++;
   7596 		}
   7597 
   7598 		/*
   7599 		 * Ensure we have enough descriptors free to describe
   7600 		 * the packet. Note, we always reserve one descriptor
   7601 		 * at the end of the ring due to the semantics of the
   7602 		 * TDT register, plus one more in the event we need
   7603 		 * to load offload context.
   7604 		 */
   7605 		if (segs_needed > txq->txq_free - 2) {
   7606 			/*
   7607 			 * Not enough free descriptors to transmit this
   7608 			 * packet.  We haven't committed anything yet,
   7609 			 * so just unload the DMA map, put the packet
   7610 			 * pack on the queue, and punt. Notify the upper
   7611 			 * layer that there are no more slots left.
   7612 			 */
   7613 			DPRINTF(WM_DEBUG_TX,
   7614 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7615 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7616 				segs_needed, txq->txq_free - 1));
   7617 			if (!is_transmit)
   7618 				ifp->if_flags |= IFF_OACTIVE;
   7619 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7620 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7621 			WM_Q_EVCNT_INCR(txq, txdstall);
   7622 			break;
   7623 		}
   7624 
   7625 		/*
   7626 		 * Check for 82547 Tx FIFO bug. We need to do this
   7627 		 * once we know we can transmit the packet, since we
   7628 		 * do some internal FIFO space accounting here.
   7629 		 */
   7630 		if (sc->sc_type == WM_T_82547 &&
   7631 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7632 			DPRINTF(WM_DEBUG_TX,
   7633 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7634 				device_xname(sc->sc_dev)));
   7635 			if (!is_transmit)
   7636 				ifp->if_flags |= IFF_OACTIVE;
   7637 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7638 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7639 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7640 			break;
   7641 		}
   7642 
   7643 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7644 
   7645 		DPRINTF(WM_DEBUG_TX,
   7646 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7647 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7648 
   7649 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7650 
   7651 		/*
   7652 		 * Store a pointer to the packet so that we can free it
   7653 		 * later.
   7654 		 *
   7655 		 * Initially, we consider the number of descriptors the
   7656 		 * packet uses the number of DMA segments.  This may be
   7657 		 * incremented by 1 if we do checksum offload (a descriptor
   7658 		 * is used to set the checksum context).
   7659 		 */
   7660 		txs->txs_mbuf = m0;
   7661 		txs->txs_firstdesc = txq->txq_next;
   7662 		txs->txs_ndesc = segs_needed;
   7663 
   7664 		/* Set up offload parameters for this packet. */
   7665 		if (m0->m_pkthdr.csum_flags &
   7666 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7667 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7668 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7669 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7670 					  &cksumfields) != 0) {
   7671 				/* Error message already displayed. */
   7672 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7673 				continue;
   7674 			}
   7675 		} else {
   7676 			cksumcmd = 0;
   7677 			cksumfields = 0;
   7678 		}
   7679 
   7680 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7681 
   7682 		/* Sync the DMA map. */
   7683 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7684 		    BUS_DMASYNC_PREWRITE);
   7685 
   7686 		/* Initialize the transmit descriptor. */
   7687 		for (nexttx = txq->txq_next, seg = 0;
   7688 		     seg < dmamap->dm_nsegs; seg++) {
   7689 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7690 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7691 			     seglen != 0;
   7692 			     curaddr += curlen, seglen -= curlen,
   7693 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7694 				curlen = seglen;
   7695 
   7696 				/*
   7697 				 * So says the Linux driver:
   7698 				 * Work around for premature descriptor
   7699 				 * write-backs in TSO mode.  Append a
   7700 				 * 4-byte sentinel descriptor.
   7701 				 */
   7702 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7703 				    curlen > 8)
   7704 					curlen -= 4;
   7705 
   7706 				wm_set_dma_addr(
   7707 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7708 				txq->txq_descs[nexttx].wtx_cmdlen
   7709 				    = htole32(cksumcmd | curlen);
   7710 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7711 				    = 0;
   7712 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7713 				    = cksumfields;
   7714 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7715 				lasttx = nexttx;
   7716 
   7717 				DPRINTF(WM_DEBUG_TX,
   7718 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7719 					"len %#04zx\n",
   7720 					device_xname(sc->sc_dev), nexttx,
   7721 					(uint64_t)curaddr, curlen));
   7722 			}
   7723 		}
   7724 
   7725 		KASSERT(lasttx != -1);
   7726 
   7727 		/*
   7728 		 * Set up the command byte on the last descriptor of
   7729 		 * the packet. If we're in the interrupt delay window,
   7730 		 * delay the interrupt.
   7731 		 */
   7732 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7733 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7734 
   7735 		/*
   7736 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7737 		 * up the descriptor to encapsulate the packet for us.
   7738 		 *
   7739 		 * This is only valid on the last descriptor of the packet.
   7740 		 */
   7741 		if (vlan_has_tag(m0)) {
   7742 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7743 			    htole32(WTX_CMD_VLE);
   7744 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7745 			    = htole16(vlan_get_tag(m0));
   7746 		}
   7747 
   7748 		txs->txs_lastdesc = lasttx;
   7749 
   7750 		DPRINTF(WM_DEBUG_TX,
   7751 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7752 			device_xname(sc->sc_dev),
   7753 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7754 
   7755 		/* Sync the descriptors we're using. */
   7756 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7757 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7758 
   7759 		/* Give the packet to the chip. */
   7760 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7761 
   7762 		DPRINTF(WM_DEBUG_TX,
   7763 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7764 
   7765 		DPRINTF(WM_DEBUG_TX,
   7766 		    ("%s: TX: finished transmitting packet, job %d\n",
   7767 			device_xname(sc->sc_dev), txq->txq_snext));
   7768 
   7769 		/* Advance the tx pointer. */
   7770 		txq->txq_free -= txs->txs_ndesc;
   7771 		txq->txq_next = nexttx;
   7772 
   7773 		txq->txq_sfree--;
   7774 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7775 
   7776 		/* Pass the packet to any BPF listeners. */
   7777 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7778 	}
   7779 
   7780 	if (m0 != NULL) {
   7781 		if (!is_transmit)
   7782 			ifp->if_flags |= IFF_OACTIVE;
   7783 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7784 		WM_Q_EVCNT_INCR(txq, descdrop);
   7785 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7786 			__func__));
   7787 		m_freem(m0);
   7788 	}
   7789 
   7790 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7791 		/* No more slots; notify upper layer. */
   7792 		if (!is_transmit)
   7793 			ifp->if_flags |= IFF_OACTIVE;
   7794 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7795 	}
   7796 
   7797 	if (txq->txq_free != ofree) {
   7798 		/* Set a watchdog timer in case the chip flakes out. */
   7799 		txq->txq_lastsent = time_uptime;
   7800 		txq->txq_sending = true;
   7801 	}
   7802 }
   7803 
   7804 /*
   7805  * wm_nq_tx_offload:
   7806  *
   7807  *	Set up TCP/IP checksumming parameters for the
   7808  *	specified packet, for NEWQUEUE devices
   7809  */
   7810 static int
   7811 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7812     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7813 {
   7814 	struct mbuf *m0 = txs->txs_mbuf;
   7815 	uint32_t vl_len, mssidx, cmdc;
   7816 	struct ether_header *eh;
   7817 	int offset, iphl;
   7818 
   7819 	/*
   7820 	 * XXX It would be nice if the mbuf pkthdr had offset
   7821 	 * fields for the protocol headers.
   7822 	 */
   7823 	*cmdlenp = 0;
   7824 	*fieldsp = 0;
   7825 
   7826 	eh = mtod(m0, struct ether_header *);
   7827 	switch (htons(eh->ether_type)) {
   7828 	case ETHERTYPE_IP:
   7829 	case ETHERTYPE_IPV6:
   7830 		offset = ETHER_HDR_LEN;
   7831 		break;
   7832 
   7833 	case ETHERTYPE_VLAN:
   7834 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7835 		break;
   7836 
   7837 	default:
   7838 		/* Don't support this protocol or encapsulation. */
   7839 		*do_csum = false;
   7840 		return 0;
   7841 	}
   7842 	*do_csum = true;
   7843 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7844 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7845 
   7846 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7847 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7848 
   7849 	if ((m0->m_pkthdr.csum_flags &
   7850 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7851 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7852 	} else {
   7853 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7854 	}
   7855 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7856 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7857 
   7858 	if (vlan_has_tag(m0)) {
   7859 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7860 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7861 		*cmdlenp |= NQTX_CMD_VLE;
   7862 	}
   7863 
   7864 	mssidx = 0;
   7865 
   7866 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7867 		int hlen = offset + iphl;
   7868 		int tcp_hlen;
   7869 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7870 
   7871 		if (__predict_false(m0->m_len <
   7872 				    (hlen + sizeof(struct tcphdr)))) {
   7873 			/*
   7874 			 * TCP/IP headers are not in the first mbuf; we need
   7875 			 * to do this the slow and painful way. Let's just
   7876 			 * hope this doesn't happen very often.
   7877 			 */
   7878 			struct tcphdr th;
   7879 
   7880 			WM_Q_EVCNT_INCR(txq, tsopain);
   7881 
   7882 			m_copydata(m0, hlen, sizeof(th), &th);
   7883 			if (v4) {
   7884 				struct ip ip;
   7885 
   7886 				m_copydata(m0, offset, sizeof(ip), &ip);
   7887 				ip.ip_len = 0;
   7888 				m_copyback(m0,
   7889 				    offset + offsetof(struct ip, ip_len),
   7890 				    sizeof(ip.ip_len), &ip.ip_len);
   7891 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7892 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7893 			} else {
   7894 				struct ip6_hdr ip6;
   7895 
   7896 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7897 				ip6.ip6_plen = 0;
   7898 				m_copyback(m0,
   7899 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7900 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7901 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7902 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7903 			}
   7904 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7905 			    sizeof(th.th_sum), &th.th_sum);
   7906 
   7907 			tcp_hlen = th.th_off << 2;
   7908 		} else {
   7909 			/*
   7910 			 * TCP/IP headers are in the first mbuf; we can do
   7911 			 * this the easy way.
   7912 			 */
   7913 			struct tcphdr *th;
   7914 
   7915 			if (v4) {
   7916 				struct ip *ip =
   7917 				    (void *)(mtod(m0, char *) + offset);
   7918 				th = (void *)(mtod(m0, char *) + hlen);
   7919 
   7920 				ip->ip_len = 0;
   7921 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7922 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7923 			} else {
   7924 				struct ip6_hdr *ip6 =
   7925 				    (void *)(mtod(m0, char *) + offset);
   7926 				th = (void *)(mtod(m0, char *) + hlen);
   7927 
   7928 				ip6->ip6_plen = 0;
   7929 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7930 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7931 			}
   7932 			tcp_hlen = th->th_off << 2;
   7933 		}
   7934 		hlen += tcp_hlen;
   7935 		*cmdlenp |= NQTX_CMD_TSE;
   7936 
   7937 		if (v4) {
   7938 			WM_Q_EVCNT_INCR(txq, tso);
   7939 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7940 		} else {
   7941 			WM_Q_EVCNT_INCR(txq, tso6);
   7942 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7943 		}
   7944 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7945 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7946 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7947 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7948 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7949 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7950 	} else {
   7951 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7952 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7953 	}
   7954 
   7955 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7956 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7957 		cmdc |= NQTXC_CMD_IP4;
   7958 	}
   7959 
   7960 	if (m0->m_pkthdr.csum_flags &
   7961 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7962 		WM_Q_EVCNT_INCR(txq, tusum);
   7963 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7964 			cmdc |= NQTXC_CMD_TCP;
   7965 		else
   7966 			cmdc |= NQTXC_CMD_UDP;
   7967 
   7968 		cmdc |= NQTXC_CMD_IP4;
   7969 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7970 	}
   7971 	if (m0->m_pkthdr.csum_flags &
   7972 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7973 		WM_Q_EVCNT_INCR(txq, tusum6);
   7974 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7975 			cmdc |= NQTXC_CMD_TCP;
   7976 		else
   7977 			cmdc |= NQTXC_CMD_UDP;
   7978 
   7979 		cmdc |= NQTXC_CMD_IP6;
   7980 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7981 	}
   7982 
   7983 	/*
   7984 	 * We don't have to write context descriptor for every packet to
   7985 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7986 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7987 	 * controllers.
   7988 	 * It would be overhead to write context descriptor for every packet,
   7989 	 * however it does not cause problems.
   7990 	 */
   7991 	/* Fill in the context descriptor. */
   7992 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7993 	    htole32(vl_len);
   7994 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7995 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7996 	    htole32(cmdc);
   7997 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7998 	    htole32(mssidx);
   7999 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8000 	DPRINTF(WM_DEBUG_TX,
   8001 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8002 		txq->txq_next, 0, vl_len));
   8003 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8004 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8005 	txs->txs_ndesc++;
   8006 	return 0;
   8007 }
   8008 
   8009 /*
   8010  * wm_nq_start:		[ifnet interface function]
   8011  *
   8012  *	Start packet transmission on the interface for NEWQUEUE devices
   8013  */
   8014 static void
   8015 wm_nq_start(struct ifnet *ifp)
   8016 {
   8017 	struct wm_softc *sc = ifp->if_softc;
   8018 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8019 
   8020 #ifdef WM_MPSAFE
   8021 	KASSERT(if_is_mpsafe(ifp));
   8022 #endif
   8023 	/*
   8024 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8025 	 */
   8026 
   8027 	mutex_enter(txq->txq_lock);
   8028 	if (!txq->txq_stopping)
   8029 		wm_nq_start_locked(ifp);
   8030 	mutex_exit(txq->txq_lock);
   8031 }
   8032 
   8033 static void
   8034 wm_nq_start_locked(struct ifnet *ifp)
   8035 {
   8036 	struct wm_softc *sc = ifp->if_softc;
   8037 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8038 
   8039 	wm_nq_send_common_locked(ifp, txq, false);
   8040 }
   8041 
   8042 static int
   8043 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8044 {
   8045 	int qid;
   8046 	struct wm_softc *sc = ifp->if_softc;
   8047 	struct wm_txqueue *txq;
   8048 
   8049 	qid = wm_select_txqueue(ifp, m);
   8050 	txq = &sc->sc_queue[qid].wmq_txq;
   8051 
   8052 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8053 		m_freem(m);
   8054 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8055 		return ENOBUFS;
   8056 	}
   8057 
   8058 	/*
   8059 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   8060 	 */
   8061 	ifp->if_obytes += m->m_pkthdr.len;
   8062 	if (m->m_flags & M_MCAST)
   8063 		ifp->if_omcasts++;
   8064 
   8065 	/*
   8066 	 * The situations which this mutex_tryenter() fails at running time
   8067 	 * are below two patterns.
   8068 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8069 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8070 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8071 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8072 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8073 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8074 	 * stuck, either.
   8075 	 */
   8076 	if (mutex_tryenter(txq->txq_lock)) {
   8077 		if (!txq->txq_stopping)
   8078 			wm_nq_transmit_locked(ifp, txq);
   8079 		mutex_exit(txq->txq_lock);
   8080 	}
   8081 
   8082 	return 0;
   8083 }
   8084 
   8085 static void
   8086 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8087 {
   8088 
   8089 	wm_nq_send_common_locked(ifp, txq, true);
   8090 }
   8091 
   8092 static void
   8093 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8094     bool is_transmit)
   8095 {
   8096 	struct wm_softc *sc = ifp->if_softc;
   8097 	struct mbuf *m0;
   8098 	struct wm_txsoft *txs;
   8099 	bus_dmamap_t dmamap;
   8100 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8101 	bool do_csum, sent;
   8102 	bool remap = true;
   8103 
   8104 	KASSERT(mutex_owned(txq->txq_lock));
   8105 
   8106 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8107 		return;
   8108 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8109 		return;
   8110 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8111 		return;
   8112 
   8113 	sent = false;
   8114 
   8115 	/*
   8116 	 * Loop through the send queue, setting up transmit descriptors
   8117 	 * until we drain the queue, or use up all available transmit
   8118 	 * descriptors.
   8119 	 */
   8120 	for (;;) {
   8121 		m0 = NULL;
   8122 
   8123 		/* Get a work queue entry. */
   8124 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8125 			wm_txeof(txq, UINT_MAX);
   8126 			if (txq->txq_sfree == 0) {
   8127 				DPRINTF(WM_DEBUG_TX,
   8128 				    ("%s: TX: no free job descriptors\n",
   8129 					device_xname(sc->sc_dev)));
   8130 				WM_Q_EVCNT_INCR(txq, txsstall);
   8131 				break;
   8132 			}
   8133 		}
   8134 
   8135 		/* Grab a packet off the queue. */
   8136 		if (is_transmit)
   8137 			m0 = pcq_get(txq->txq_interq);
   8138 		else
   8139 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8140 		if (m0 == NULL)
   8141 			break;
   8142 
   8143 		DPRINTF(WM_DEBUG_TX,
   8144 		    ("%s: TX: have packet to transmit: %p\n",
   8145 		    device_xname(sc->sc_dev), m0));
   8146 
   8147 		txs = &txq->txq_soft[txq->txq_snext];
   8148 		dmamap = txs->txs_dmamap;
   8149 
   8150 		/*
   8151 		 * Load the DMA map.  If this fails, the packet either
   8152 		 * didn't fit in the allotted number of segments, or we
   8153 		 * were short on resources.  For the too-many-segments
   8154 		 * case, we simply report an error and drop the packet,
   8155 		 * since we can't sanely copy a jumbo packet to a single
   8156 		 * buffer.
   8157 		 */
   8158 retry:
   8159 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8160 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8161 		if (__predict_false(error)) {
   8162 			if (error == EFBIG) {
   8163 				if (remap == true) {
   8164 					struct mbuf *m;
   8165 
   8166 					remap = false;
   8167 					m = m_defrag(m0, M_NOWAIT);
   8168 					if (m != NULL) {
   8169 						WM_Q_EVCNT_INCR(txq, defrag);
   8170 						m0 = m;
   8171 						goto retry;
   8172 					}
   8173 				}
   8174 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8175 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8176 				    "DMA segments, dropping...\n",
   8177 				    device_xname(sc->sc_dev));
   8178 				wm_dump_mbuf_chain(sc, m0);
   8179 				m_freem(m0);
   8180 				continue;
   8181 			}
   8182 			/* Short on resources, just stop for now. */
   8183 			DPRINTF(WM_DEBUG_TX,
   8184 			    ("%s: TX: dmamap load failed: %d\n",
   8185 				device_xname(sc->sc_dev), error));
   8186 			break;
   8187 		}
   8188 
   8189 		segs_needed = dmamap->dm_nsegs;
   8190 
   8191 		/*
   8192 		 * Ensure we have enough descriptors free to describe
   8193 		 * the packet. Note, we always reserve one descriptor
   8194 		 * at the end of the ring due to the semantics of the
   8195 		 * TDT register, plus one more in the event we need
   8196 		 * to load offload context.
   8197 		 */
   8198 		if (segs_needed > txq->txq_free - 2) {
   8199 			/*
   8200 			 * Not enough free descriptors to transmit this
   8201 			 * packet.  We haven't committed anything yet,
   8202 			 * so just unload the DMA map, put the packet
   8203 			 * pack on the queue, and punt. Notify the upper
   8204 			 * layer that there are no more slots left.
   8205 			 */
   8206 			DPRINTF(WM_DEBUG_TX,
   8207 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8208 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8209 				segs_needed, txq->txq_free - 1));
   8210 			if (!is_transmit)
   8211 				ifp->if_flags |= IFF_OACTIVE;
   8212 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8213 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8214 			WM_Q_EVCNT_INCR(txq, txdstall);
   8215 			break;
   8216 		}
   8217 
   8218 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8219 
   8220 		DPRINTF(WM_DEBUG_TX,
   8221 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8222 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8223 
   8224 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8225 
   8226 		/*
   8227 		 * Store a pointer to the packet so that we can free it
   8228 		 * later.
   8229 		 *
   8230 		 * Initially, we consider the number of descriptors the
   8231 		 * packet uses the number of DMA segments.  This may be
   8232 		 * incremented by 1 if we do checksum offload (a descriptor
   8233 		 * is used to set the checksum context).
   8234 		 */
   8235 		txs->txs_mbuf = m0;
   8236 		txs->txs_firstdesc = txq->txq_next;
   8237 		txs->txs_ndesc = segs_needed;
   8238 
   8239 		/* Set up offload parameters for this packet. */
   8240 		uint32_t cmdlen, fields, dcmdlen;
   8241 		if (m0->m_pkthdr.csum_flags &
   8242 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8243 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8244 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8245 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8246 			    &do_csum) != 0) {
   8247 				/* Error message already displayed. */
   8248 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8249 				continue;
   8250 			}
   8251 		} else {
   8252 			do_csum = false;
   8253 			cmdlen = 0;
   8254 			fields = 0;
   8255 		}
   8256 
   8257 		/* Sync the DMA map. */
   8258 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8259 		    BUS_DMASYNC_PREWRITE);
   8260 
   8261 		/* Initialize the first transmit descriptor. */
   8262 		nexttx = txq->txq_next;
   8263 		if (!do_csum) {
   8264 			/* setup a legacy descriptor */
   8265 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8266 			    dmamap->dm_segs[0].ds_addr);
   8267 			txq->txq_descs[nexttx].wtx_cmdlen =
   8268 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8269 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8270 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8271 			if (vlan_has_tag(m0)) {
   8272 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8273 				    htole32(WTX_CMD_VLE);
   8274 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8275 				    htole16(vlan_get_tag(m0));
   8276 			} else
   8277 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8278 
   8279 			dcmdlen = 0;
   8280 		} else {
   8281 			/* setup an advanced data descriptor */
   8282 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8283 			    htole64(dmamap->dm_segs[0].ds_addr);
   8284 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8285 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8286 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8287 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8288 			    htole32(fields);
   8289 			DPRINTF(WM_DEBUG_TX,
   8290 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8291 				device_xname(sc->sc_dev), nexttx,
   8292 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8293 			DPRINTF(WM_DEBUG_TX,
   8294 			    ("\t 0x%08x%08x\n", fields,
   8295 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8296 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8297 		}
   8298 
   8299 		lasttx = nexttx;
   8300 		nexttx = WM_NEXTTX(txq, nexttx);
   8301 		/*
   8302 		 * fill in the next descriptors. legacy or advanced format
   8303 		 * is the same here
   8304 		 */
   8305 		for (seg = 1; seg < dmamap->dm_nsegs;
   8306 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8307 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8308 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8309 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8310 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8311 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8312 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8313 			lasttx = nexttx;
   8314 
   8315 			DPRINTF(WM_DEBUG_TX,
   8316 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8317 				device_xname(sc->sc_dev), nexttx,
   8318 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8319 				dmamap->dm_segs[seg].ds_len));
   8320 		}
   8321 
   8322 		KASSERT(lasttx != -1);
   8323 
   8324 		/*
   8325 		 * Set up the command byte on the last descriptor of
   8326 		 * the packet. If we're in the interrupt delay window,
   8327 		 * delay the interrupt.
   8328 		 */
   8329 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8330 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8331 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8332 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8333 
   8334 		txs->txs_lastdesc = lasttx;
   8335 
   8336 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8337 		    device_xname(sc->sc_dev),
   8338 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8339 
   8340 		/* Sync the descriptors we're using. */
   8341 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8342 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8343 
   8344 		/* Give the packet to the chip. */
   8345 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8346 		sent = true;
   8347 
   8348 		DPRINTF(WM_DEBUG_TX,
   8349 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8350 
   8351 		DPRINTF(WM_DEBUG_TX,
   8352 		    ("%s: TX: finished transmitting packet, job %d\n",
   8353 			device_xname(sc->sc_dev), txq->txq_snext));
   8354 
   8355 		/* Advance the tx pointer. */
   8356 		txq->txq_free -= txs->txs_ndesc;
   8357 		txq->txq_next = nexttx;
   8358 
   8359 		txq->txq_sfree--;
   8360 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8361 
   8362 		/* Pass the packet to any BPF listeners. */
   8363 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8364 	}
   8365 
   8366 	if (m0 != NULL) {
   8367 		if (!is_transmit)
   8368 			ifp->if_flags |= IFF_OACTIVE;
   8369 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8370 		WM_Q_EVCNT_INCR(txq, descdrop);
   8371 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8372 			__func__));
   8373 		m_freem(m0);
   8374 	}
   8375 
   8376 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8377 		/* No more slots; notify upper layer. */
   8378 		if (!is_transmit)
   8379 			ifp->if_flags |= IFF_OACTIVE;
   8380 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8381 	}
   8382 
   8383 	if (sent) {
   8384 		/* Set a watchdog timer in case the chip flakes out. */
   8385 		txq->txq_lastsent = time_uptime;
   8386 		txq->txq_sending = true;
   8387 	}
   8388 }
   8389 
   8390 static void
   8391 wm_deferred_start_locked(struct wm_txqueue *txq)
   8392 {
   8393 	struct wm_softc *sc = txq->txq_sc;
   8394 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8395 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8396 	int qid = wmq->wmq_id;
   8397 
   8398 	KASSERT(mutex_owned(txq->txq_lock));
   8399 
   8400 	if (txq->txq_stopping) {
   8401 		mutex_exit(txq->txq_lock);
   8402 		return;
   8403 	}
   8404 
   8405 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8406 		/* XXX need for ALTQ or one CPU system */
   8407 		if (qid == 0)
   8408 			wm_nq_start_locked(ifp);
   8409 		wm_nq_transmit_locked(ifp, txq);
   8410 	} else {
   8411 		/* XXX need for ALTQ or one CPU system */
   8412 		if (qid == 0)
   8413 			wm_start_locked(ifp);
   8414 		wm_transmit_locked(ifp, txq);
   8415 	}
   8416 }
   8417 
   8418 /* Interrupt */
   8419 
   8420 /*
   8421  * wm_txeof:
   8422  *
   8423  *	Helper; handle transmit interrupts.
   8424  */
   8425 static bool
   8426 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8427 {
   8428 	struct wm_softc *sc = txq->txq_sc;
   8429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8430 	struct wm_txsoft *txs;
   8431 	int count = 0;
   8432 	int i;
   8433 	uint8_t status;
   8434 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8435 	bool more = false;
   8436 
   8437 	KASSERT(mutex_owned(txq->txq_lock));
   8438 
   8439 	if (txq->txq_stopping)
   8440 		return false;
   8441 
   8442 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8443 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8444 	if (wmq->wmq_id == 0)
   8445 		ifp->if_flags &= ~IFF_OACTIVE;
   8446 
   8447 	/*
   8448 	 * Go through the Tx list and free mbufs for those
   8449 	 * frames which have been transmitted.
   8450 	 */
   8451 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8452 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8453 		if (limit-- == 0) {
   8454 			more = true;
   8455 			DPRINTF(WM_DEBUG_TX,
   8456 			    ("%s: TX: loop limited, job %d is not processed\n",
   8457 				device_xname(sc->sc_dev), i));
   8458 			break;
   8459 		}
   8460 
   8461 		txs = &txq->txq_soft[i];
   8462 
   8463 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8464 			device_xname(sc->sc_dev), i));
   8465 
   8466 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8467 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8468 
   8469 		status =
   8470 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8471 		if ((status & WTX_ST_DD) == 0) {
   8472 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8473 			    BUS_DMASYNC_PREREAD);
   8474 			break;
   8475 		}
   8476 
   8477 		count++;
   8478 		DPRINTF(WM_DEBUG_TX,
   8479 		    ("%s: TX: job %d done: descs %d..%d\n",
   8480 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8481 		    txs->txs_lastdesc));
   8482 
   8483 		/*
   8484 		 * XXX We should probably be using the statistics
   8485 		 * XXX registers, but I don't know if they exist
   8486 		 * XXX on chips before the i82544.
   8487 		 */
   8488 
   8489 #ifdef WM_EVENT_COUNTERS
   8490 		if (status & WTX_ST_TU)
   8491 			WM_Q_EVCNT_INCR(txq, underrun);
   8492 #endif /* WM_EVENT_COUNTERS */
   8493 
   8494 		/*
   8495 		 * 82574 and newer's document says the status field has neither
   8496 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8497 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8498 		 * Developer's Manual", 82574 datasheet and newer.
   8499 		 *
   8500 		 * XXX I saw the LC bit was set on I218 even though the media
   8501 		 * was full duplex, so the bit might be used for other
   8502 		 * meaning ...(I have no document).
   8503 		 */
   8504 
   8505 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8506 		    && ((sc->sc_type < WM_T_82574)
   8507 			|| (sc->sc_type == WM_T_80003))) {
   8508 			ifp->if_oerrors++;
   8509 			if (status & WTX_ST_LC)
   8510 				log(LOG_WARNING, "%s: late collision\n",
   8511 				    device_xname(sc->sc_dev));
   8512 			else if (status & WTX_ST_EC) {
   8513 				ifp->if_collisions +=
   8514 				    TX_COLLISION_THRESHOLD + 1;
   8515 				log(LOG_WARNING, "%s: excessive collisions\n",
   8516 				    device_xname(sc->sc_dev));
   8517 			}
   8518 		} else
   8519 			ifp->if_opackets++;
   8520 
   8521 		txq->txq_packets++;
   8522 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8523 
   8524 		txq->txq_free += txs->txs_ndesc;
   8525 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8526 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8527 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8528 		m_freem(txs->txs_mbuf);
   8529 		txs->txs_mbuf = NULL;
   8530 	}
   8531 
   8532 	/* Update the dirty transmit buffer pointer. */
   8533 	txq->txq_sdirty = i;
   8534 	DPRINTF(WM_DEBUG_TX,
   8535 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8536 
   8537 	if (count != 0)
   8538 		rnd_add_uint32(&sc->rnd_source, count);
   8539 
   8540 	/*
   8541 	 * If there are no more pending transmissions, cancel the watchdog
   8542 	 * timer.
   8543 	 */
   8544 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8545 		txq->txq_sending = false;
   8546 
   8547 	return more;
   8548 }
   8549 
   8550 static inline uint32_t
   8551 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8552 {
   8553 	struct wm_softc *sc = rxq->rxq_sc;
   8554 
   8555 	if (sc->sc_type == WM_T_82574)
   8556 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8557 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8558 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8559 	else
   8560 		return rxq->rxq_descs[idx].wrx_status;
   8561 }
   8562 
   8563 static inline uint32_t
   8564 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8565 {
   8566 	struct wm_softc *sc = rxq->rxq_sc;
   8567 
   8568 	if (sc->sc_type == WM_T_82574)
   8569 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8570 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8571 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8572 	else
   8573 		return rxq->rxq_descs[idx].wrx_errors;
   8574 }
   8575 
   8576 static inline uint16_t
   8577 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8578 {
   8579 	struct wm_softc *sc = rxq->rxq_sc;
   8580 
   8581 	if (sc->sc_type == WM_T_82574)
   8582 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8583 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8584 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8585 	else
   8586 		return rxq->rxq_descs[idx].wrx_special;
   8587 }
   8588 
   8589 static inline int
   8590 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8591 {
   8592 	struct wm_softc *sc = rxq->rxq_sc;
   8593 
   8594 	if (sc->sc_type == WM_T_82574)
   8595 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8596 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8597 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8598 	else
   8599 		return rxq->rxq_descs[idx].wrx_len;
   8600 }
   8601 
   8602 #ifdef WM_DEBUG
   8603 static inline uint32_t
   8604 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8605 {
   8606 	struct wm_softc *sc = rxq->rxq_sc;
   8607 
   8608 	if (sc->sc_type == WM_T_82574)
   8609 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8610 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8611 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8612 	else
   8613 		return 0;
   8614 }
   8615 
   8616 static inline uint8_t
   8617 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8618 {
   8619 	struct wm_softc *sc = rxq->rxq_sc;
   8620 
   8621 	if (sc->sc_type == WM_T_82574)
   8622 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8623 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8624 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8625 	else
   8626 		return 0;
   8627 }
   8628 #endif /* WM_DEBUG */
   8629 
   8630 static inline bool
   8631 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8632     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8633 {
   8634 
   8635 	if (sc->sc_type == WM_T_82574)
   8636 		return (status & ext_bit) != 0;
   8637 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8638 		return (status & nq_bit) != 0;
   8639 	else
   8640 		return (status & legacy_bit) != 0;
   8641 }
   8642 
   8643 static inline bool
   8644 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8645     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8646 {
   8647 
   8648 	if (sc->sc_type == WM_T_82574)
   8649 		return (error & ext_bit) != 0;
   8650 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8651 		return (error & nq_bit) != 0;
   8652 	else
   8653 		return (error & legacy_bit) != 0;
   8654 }
   8655 
   8656 static inline bool
   8657 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8658 {
   8659 
   8660 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8661 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8662 		return true;
   8663 	else
   8664 		return false;
   8665 }
   8666 
   8667 static inline bool
   8668 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8669 {
   8670 	struct wm_softc *sc = rxq->rxq_sc;
   8671 
   8672 	/* XXXX missing error bit for newqueue? */
   8673 	if (wm_rxdesc_is_set_error(sc, errors,
   8674 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8675 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8676 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8677 		NQRXC_ERROR_RXE)) {
   8678 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8679 		    EXTRXC_ERROR_SE, 0))
   8680 			log(LOG_WARNING, "%s: symbol error\n",
   8681 			    device_xname(sc->sc_dev));
   8682 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8683 		    EXTRXC_ERROR_SEQ, 0))
   8684 			log(LOG_WARNING, "%s: receive sequence error\n",
   8685 			    device_xname(sc->sc_dev));
   8686 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8687 		    EXTRXC_ERROR_CE, 0))
   8688 			log(LOG_WARNING, "%s: CRC error\n",
   8689 			    device_xname(sc->sc_dev));
   8690 		return true;
   8691 	}
   8692 
   8693 	return false;
   8694 }
   8695 
   8696 static inline bool
   8697 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8698 {
   8699 	struct wm_softc *sc = rxq->rxq_sc;
   8700 
   8701 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8702 		NQRXC_STATUS_DD)) {
   8703 		/* We have processed all of the receive descriptors. */
   8704 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8705 		return false;
   8706 	}
   8707 
   8708 	return true;
   8709 }
   8710 
   8711 static inline bool
   8712 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8713     uint16_t vlantag, struct mbuf *m)
   8714 {
   8715 
   8716 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8717 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8718 		vlan_set_tag(m, le16toh(vlantag));
   8719 	}
   8720 
   8721 	return true;
   8722 }
   8723 
   8724 static inline void
   8725 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8726     uint32_t errors, struct mbuf *m)
   8727 {
   8728 	struct wm_softc *sc = rxq->rxq_sc;
   8729 
   8730 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8731 		if (wm_rxdesc_is_set_status(sc, status,
   8732 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8733 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8734 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8735 			if (wm_rxdesc_is_set_error(sc, errors,
   8736 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8737 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8738 		}
   8739 		if (wm_rxdesc_is_set_status(sc, status,
   8740 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8741 			/*
   8742 			 * Note: we don't know if this was TCP or UDP,
   8743 			 * so we just set both bits, and expect the
   8744 			 * upper layers to deal.
   8745 			 */
   8746 			WM_Q_EVCNT_INCR(rxq, tusum);
   8747 			m->m_pkthdr.csum_flags |=
   8748 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8749 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8750 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8751 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8752 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8753 		}
   8754 	}
   8755 }
   8756 
   8757 /*
   8758  * wm_rxeof:
   8759  *
   8760  *	Helper; handle receive interrupts.
   8761  */
   8762 static bool
   8763 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8764 {
   8765 	struct wm_softc *sc = rxq->rxq_sc;
   8766 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8767 	struct wm_rxsoft *rxs;
   8768 	struct mbuf *m;
   8769 	int i, len;
   8770 	int count = 0;
   8771 	uint32_t status, errors;
   8772 	uint16_t vlantag;
   8773 	bool more = false;
   8774 
   8775 	KASSERT(mutex_owned(rxq->rxq_lock));
   8776 
   8777 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8778 		if (limit-- == 0) {
   8779 			rxq->rxq_ptr = i;
   8780 			more = true;
   8781 			DPRINTF(WM_DEBUG_RX,
   8782 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8783 				device_xname(sc->sc_dev), i));
   8784 			break;
   8785 		}
   8786 
   8787 		rxs = &rxq->rxq_soft[i];
   8788 
   8789 		DPRINTF(WM_DEBUG_RX,
   8790 		    ("%s: RX: checking descriptor %d\n",
   8791 			device_xname(sc->sc_dev), i));
   8792 		wm_cdrxsync(rxq, i,
   8793 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8794 
   8795 		status = wm_rxdesc_get_status(rxq, i);
   8796 		errors = wm_rxdesc_get_errors(rxq, i);
   8797 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8798 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8799 #ifdef WM_DEBUG
   8800 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8801 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8802 #endif
   8803 
   8804 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8805 			/*
   8806 			 * Update the receive pointer holding rxq_lock
   8807 			 * consistent with increment counter.
   8808 			 */
   8809 			rxq->rxq_ptr = i;
   8810 			break;
   8811 		}
   8812 
   8813 		count++;
   8814 		if (__predict_false(rxq->rxq_discard)) {
   8815 			DPRINTF(WM_DEBUG_RX,
   8816 			    ("%s: RX: discarding contents of descriptor %d\n",
   8817 				device_xname(sc->sc_dev), i));
   8818 			wm_init_rxdesc(rxq, i);
   8819 			if (wm_rxdesc_is_eop(rxq, status)) {
   8820 				/* Reset our state. */
   8821 				DPRINTF(WM_DEBUG_RX,
   8822 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8823 					device_xname(sc->sc_dev)));
   8824 				rxq->rxq_discard = 0;
   8825 			}
   8826 			continue;
   8827 		}
   8828 
   8829 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8830 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8831 
   8832 		m = rxs->rxs_mbuf;
   8833 
   8834 		/*
   8835 		 * Add a new receive buffer to the ring, unless of
   8836 		 * course the length is zero. Treat the latter as a
   8837 		 * failed mapping.
   8838 		 */
   8839 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8840 			/*
   8841 			 * Failed, throw away what we've done so
   8842 			 * far, and discard the rest of the packet.
   8843 			 */
   8844 			ifp->if_ierrors++;
   8845 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8846 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8847 			wm_init_rxdesc(rxq, i);
   8848 			if (!wm_rxdesc_is_eop(rxq, status))
   8849 				rxq->rxq_discard = 1;
   8850 			if (rxq->rxq_head != NULL)
   8851 				m_freem(rxq->rxq_head);
   8852 			WM_RXCHAIN_RESET(rxq);
   8853 			DPRINTF(WM_DEBUG_RX,
   8854 			    ("%s: RX: Rx buffer allocation failed, "
   8855 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8856 				rxq->rxq_discard ? " (discard)" : ""));
   8857 			continue;
   8858 		}
   8859 
   8860 		m->m_len = len;
   8861 		rxq->rxq_len += len;
   8862 		DPRINTF(WM_DEBUG_RX,
   8863 		    ("%s: RX: buffer at %p len %d\n",
   8864 			device_xname(sc->sc_dev), m->m_data, len));
   8865 
   8866 		/* If this is not the end of the packet, keep looking. */
   8867 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8868 			WM_RXCHAIN_LINK(rxq, m);
   8869 			DPRINTF(WM_DEBUG_RX,
   8870 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8871 				device_xname(sc->sc_dev), rxq->rxq_len));
   8872 			continue;
   8873 		}
   8874 
   8875 		/*
   8876 		 * Okay, we have the entire packet now. The chip is
   8877 		 * configured to include the FCS except I350 and I21[01]
   8878 		 * (not all chips can be configured to strip it),
   8879 		 * so we need to trim it.
   8880 		 * May need to adjust length of previous mbuf in the
   8881 		 * chain if the current mbuf is too short.
   8882 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8883 		 * is always set in I350, so we don't trim it.
   8884 		 */
   8885 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8886 		    && (sc->sc_type != WM_T_I210)
   8887 		    && (sc->sc_type != WM_T_I211)) {
   8888 			if (m->m_len < ETHER_CRC_LEN) {
   8889 				rxq->rxq_tail->m_len
   8890 				    -= (ETHER_CRC_LEN - m->m_len);
   8891 				m->m_len = 0;
   8892 			} else
   8893 				m->m_len -= ETHER_CRC_LEN;
   8894 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8895 		} else
   8896 			len = rxq->rxq_len;
   8897 
   8898 		WM_RXCHAIN_LINK(rxq, m);
   8899 
   8900 		*rxq->rxq_tailp = NULL;
   8901 		m = rxq->rxq_head;
   8902 
   8903 		WM_RXCHAIN_RESET(rxq);
   8904 
   8905 		DPRINTF(WM_DEBUG_RX,
   8906 		    ("%s: RX: have entire packet, len -> %d\n",
   8907 			device_xname(sc->sc_dev), len));
   8908 
   8909 		/* If an error occurred, update stats and drop the packet. */
   8910 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8911 			m_freem(m);
   8912 			continue;
   8913 		}
   8914 
   8915 		/* No errors.  Receive the packet. */
   8916 		m_set_rcvif(m, ifp);
   8917 		m->m_pkthdr.len = len;
   8918 		/*
   8919 		 * TODO
   8920 		 * should be save rsshash and rsstype to this mbuf.
   8921 		 */
   8922 		DPRINTF(WM_DEBUG_RX,
   8923 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8924 			device_xname(sc->sc_dev), rsstype, rsshash));
   8925 
   8926 		/*
   8927 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8928 		 * for us.  Associate the tag with the packet.
   8929 		 */
   8930 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8931 			continue;
   8932 
   8933 		/* Set up checksum info for this packet. */
   8934 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8935 		/*
   8936 		 * Update the receive pointer holding rxq_lock consistent with
   8937 		 * increment counter.
   8938 		 */
   8939 		rxq->rxq_ptr = i;
   8940 		rxq->rxq_packets++;
   8941 		rxq->rxq_bytes += len;
   8942 		mutex_exit(rxq->rxq_lock);
   8943 
   8944 		/* Pass it on. */
   8945 		if_percpuq_enqueue(sc->sc_ipq, m);
   8946 
   8947 		mutex_enter(rxq->rxq_lock);
   8948 
   8949 		if (rxq->rxq_stopping)
   8950 			break;
   8951 	}
   8952 
   8953 	if (count != 0)
   8954 		rnd_add_uint32(&sc->rnd_source, count);
   8955 
   8956 	DPRINTF(WM_DEBUG_RX,
   8957 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8958 
   8959 	return more;
   8960 }
   8961 
   8962 /*
   8963  * wm_linkintr_gmii:
   8964  *
   8965  *	Helper; handle link interrupts for GMII.
   8966  */
   8967 static void
   8968 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8969 {
   8970 	device_t dev = sc->sc_dev;
   8971 	uint32_t status, reg;
   8972 	bool link;
   8973 	int rv;
   8974 
   8975 	KASSERT(WM_CORE_LOCKED(sc));
   8976 
   8977 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8978 		__func__));
   8979 
   8980 	if ((icr & ICR_LSC) == 0) {
   8981 		if (icr & ICR_RXSEQ)
   8982 			DPRINTF(WM_DEBUG_LINK,
   8983 			    ("%s: LINK Receive sequence error\n",
   8984 				device_xname(dev)));
   8985 		return;
   8986 	}
   8987 
   8988 	/* Link status changed */
   8989 	status = CSR_READ(sc, WMREG_STATUS);
   8990 	link = status & STATUS_LU;
   8991 	if (link)
   8992 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8993 			device_xname(dev),
   8994 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8995 	else
   8996 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8997 			device_xname(dev)));
   8998 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8999 		wm_gig_downshift_workaround_ich8lan(sc);
   9000 
   9001 	if ((sc->sc_type == WM_T_ICH8)
   9002 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9003 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9004 	}
   9005 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9006 		device_xname(dev)));
   9007 	mii_pollstat(&sc->sc_mii);
   9008 	if (sc->sc_type == WM_T_82543) {
   9009 		int miistatus, active;
   9010 
   9011 		/*
   9012 		 * With 82543, we need to force speed and
   9013 		 * duplex on the MAC equal to what the PHY
   9014 		 * speed and duplex configuration is.
   9015 		 */
   9016 		miistatus = sc->sc_mii.mii_media_status;
   9017 
   9018 		if (miistatus & IFM_ACTIVE) {
   9019 			active = sc->sc_mii.mii_media_active;
   9020 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9021 			switch (IFM_SUBTYPE(active)) {
   9022 			case IFM_10_T:
   9023 				sc->sc_ctrl |= CTRL_SPEED_10;
   9024 				break;
   9025 			case IFM_100_TX:
   9026 				sc->sc_ctrl |= CTRL_SPEED_100;
   9027 				break;
   9028 			case IFM_1000_T:
   9029 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9030 				break;
   9031 			default:
   9032 				/*
   9033 				 * fiber?
   9034 				 * Shoud not enter here.
   9035 				 */
   9036 				printf("unknown media (%x)\n", active);
   9037 				break;
   9038 			}
   9039 			if (active & IFM_FDX)
   9040 				sc->sc_ctrl |= CTRL_FD;
   9041 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9042 		}
   9043 	} else if (sc->sc_type == WM_T_PCH) {
   9044 		wm_k1_gig_workaround_hv(sc,
   9045 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9046 	}
   9047 
   9048 	/*
   9049 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9050 	 * aggressive resulting in many collisions. To avoid this, increase
   9051 	 * the IPG and reduce Rx latency in the PHY.
   9052 	 */
   9053 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9054 	    && link) {
   9055 		uint32_t tipg_reg;
   9056 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9057 		bool fdx;
   9058 		uint16_t emi_addr, emi_val;
   9059 
   9060 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9061 		tipg_reg &= ~TIPG_IPGT_MASK;
   9062 		fdx = status & STATUS_FD;
   9063 
   9064 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9065 			tipg_reg |= 0xff;
   9066 			/* Reduce Rx latency in analog PHY */
   9067 			emi_val = 0;
   9068 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9069 		    fdx && speed != STATUS_SPEED_1000) {
   9070 			tipg_reg |= 0xc;
   9071 			emi_val = 1;
   9072 		} else {
   9073 			/* Roll back the default values */
   9074 			tipg_reg |= 0x08;
   9075 			emi_val = 1;
   9076 		}
   9077 
   9078 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9079 
   9080 		rv = sc->phy.acquire(sc);
   9081 		if (rv)
   9082 			return;
   9083 
   9084 		if (sc->sc_type == WM_T_PCH2)
   9085 			emi_addr = I82579_RX_CONFIG;
   9086 		else
   9087 			emi_addr = I217_RX_CONFIG;
   9088 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9089 
   9090 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9091 			uint16_t phy_reg;
   9092 
   9093 			sc->phy.readreg_locked(dev, 2,
   9094 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9095 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9096 			if (speed == STATUS_SPEED_100
   9097 			    || speed == STATUS_SPEED_10)
   9098 				phy_reg |= 0x3e8;
   9099 			else
   9100 				phy_reg |= 0xfa;
   9101 			sc->phy.writereg_locked(dev, 2,
   9102 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9103 
   9104 			if (speed == STATUS_SPEED_1000) {
   9105 				sc->phy.readreg_locked(dev, 2,
   9106 				    HV_PM_CTRL, &phy_reg);
   9107 
   9108 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9109 
   9110 				sc->phy.writereg_locked(dev, 2,
   9111 				    HV_PM_CTRL, phy_reg);
   9112 			}
   9113 		}
   9114 		sc->phy.release(sc);
   9115 
   9116 		if (rv)
   9117 			return;
   9118 
   9119 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9120 			uint16_t data, ptr_gap;
   9121 
   9122 			if (speed == STATUS_SPEED_1000) {
   9123 				rv = sc->phy.acquire(sc);
   9124 				if (rv)
   9125 					return;
   9126 
   9127 				rv = sc->phy.readreg_locked(dev, 2,
   9128 				    I219_UNKNOWN1, &data);
   9129 				if (rv) {
   9130 					sc->phy.release(sc);
   9131 					return;
   9132 				}
   9133 
   9134 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9135 				if (ptr_gap < 0x18) {
   9136 					data &= ~(0x3ff << 2);
   9137 					data |= (0x18 << 2);
   9138 					rv = sc->phy.writereg_locked(dev,
   9139 					    2, I219_UNKNOWN1, data);
   9140 				}
   9141 				sc->phy.release(sc);
   9142 				if (rv)
   9143 					return;
   9144 			} else {
   9145 				rv = sc->phy.acquire(sc);
   9146 				if (rv)
   9147 					return;
   9148 
   9149 				rv = sc->phy.writereg_locked(dev, 2,
   9150 				    I219_UNKNOWN1, 0xc023);
   9151 				sc->phy.release(sc);
   9152 				if (rv)
   9153 					return;
   9154 
   9155 			}
   9156 		}
   9157 	}
   9158 
   9159 	/*
   9160 	 * I217 Packet Loss issue:
   9161 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9162 	 * on power up.
   9163 	 * Set the Beacon Duration for I217 to 8 usec
   9164 	 */
   9165 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9166 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9167 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9168 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9169 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9170 	}
   9171 
   9172 	/* Work-around I218 hang issue */
   9173 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9174 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9175 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9176 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9177 		wm_k1_workaround_lpt_lp(sc, link);
   9178 
   9179 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9180 		/*
   9181 		 * Set platform power management values for Latency
   9182 		 * Tolerance Reporting (LTR)
   9183 		 */
   9184 		wm_platform_pm_pch_lpt(sc,
   9185 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9186 	}
   9187 
   9188 	/* Clear link partner's EEE ability */
   9189 	sc->eee_lp_ability = 0;
   9190 
   9191 	/* FEXTNVM6 K1-off workaround */
   9192 	if (sc->sc_type == WM_T_PCH_SPT) {
   9193 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9194 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9195 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9196 		else
   9197 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9198 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9199 	}
   9200 
   9201 	if (!link)
   9202 		return;
   9203 
   9204 	switch (sc->sc_type) {
   9205 	case WM_T_PCH2:
   9206 		wm_k1_workaround_lv(sc);
   9207 		/* FALLTHROUGH */
   9208 	case WM_T_PCH:
   9209 		if (sc->sc_phytype == WMPHY_82578)
   9210 			wm_link_stall_workaround_hv(sc);
   9211 		break;
   9212 	default:
   9213 		break;
   9214 	}
   9215 
   9216 	/* Enable/Disable EEE after link up */
   9217 	if (sc->sc_phytype > WMPHY_82579)
   9218 		wm_set_eee_pchlan(sc);
   9219 }
   9220 
   9221 /*
   9222  * wm_linkintr_tbi:
   9223  *
   9224  *	Helper; handle link interrupts for TBI mode.
   9225  */
   9226 static void
   9227 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9228 {
   9229 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9230 	uint32_t status;
   9231 
   9232 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9233 		__func__));
   9234 
   9235 	status = CSR_READ(sc, WMREG_STATUS);
   9236 	if (icr & ICR_LSC) {
   9237 		wm_check_for_link(sc);
   9238 		if (status & STATUS_LU) {
   9239 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9240 				device_xname(sc->sc_dev),
   9241 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9242 			/*
   9243 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9244 			 * so we should update sc->sc_ctrl
   9245 			 */
   9246 
   9247 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9248 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9249 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9250 			if (status & STATUS_FD)
   9251 				sc->sc_tctl |=
   9252 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9253 			else
   9254 				sc->sc_tctl |=
   9255 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9256 			if (sc->sc_ctrl & CTRL_TFCE)
   9257 				sc->sc_fcrtl |= FCRTL_XONE;
   9258 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9259 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9260 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9261 			sc->sc_tbi_linkup = 1;
   9262 			if_link_state_change(ifp, LINK_STATE_UP);
   9263 		} else {
   9264 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9265 				device_xname(sc->sc_dev)));
   9266 			sc->sc_tbi_linkup = 0;
   9267 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9268 		}
   9269 		/* Update LED */
   9270 		wm_tbi_serdes_set_linkled(sc);
   9271 	} else if (icr & ICR_RXSEQ)
   9272 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9273 			device_xname(sc->sc_dev)));
   9274 }
   9275 
   9276 /*
   9277  * wm_linkintr_serdes:
   9278  *
   9279  *	Helper; handle link interrupts for TBI mode.
   9280  */
   9281 static void
   9282 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9283 {
   9284 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9285 	struct mii_data *mii = &sc->sc_mii;
   9286 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9287 	uint32_t pcs_adv, pcs_lpab, reg;
   9288 
   9289 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9290 		__func__));
   9291 
   9292 	if (icr & ICR_LSC) {
   9293 		/* Check PCS */
   9294 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9295 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9296 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9297 				device_xname(sc->sc_dev)));
   9298 			mii->mii_media_status |= IFM_ACTIVE;
   9299 			sc->sc_tbi_linkup = 1;
   9300 			if_link_state_change(ifp, LINK_STATE_UP);
   9301 		} else {
   9302 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9303 				device_xname(sc->sc_dev)));
   9304 			mii->mii_media_status |= IFM_NONE;
   9305 			sc->sc_tbi_linkup = 0;
   9306 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9307 			wm_tbi_serdes_set_linkled(sc);
   9308 			return;
   9309 		}
   9310 		mii->mii_media_active |= IFM_1000_SX;
   9311 		if ((reg & PCS_LSTS_FDX) != 0)
   9312 			mii->mii_media_active |= IFM_FDX;
   9313 		else
   9314 			mii->mii_media_active |= IFM_HDX;
   9315 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9316 			/* Check flow */
   9317 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9318 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9319 				DPRINTF(WM_DEBUG_LINK,
   9320 				    ("XXX LINKOK but not ACOMP\n"));
   9321 				return;
   9322 			}
   9323 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9324 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9325 			DPRINTF(WM_DEBUG_LINK,
   9326 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9327 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9328 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9329 				mii->mii_media_active |= IFM_FLOW
   9330 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9331 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9332 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9333 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9334 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9335 				mii->mii_media_active |= IFM_FLOW
   9336 				    | IFM_ETH_TXPAUSE;
   9337 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9338 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9339 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9340 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9341 				mii->mii_media_active |= IFM_FLOW
   9342 				    | IFM_ETH_RXPAUSE;
   9343 		}
   9344 		/* Update LED */
   9345 		wm_tbi_serdes_set_linkled(sc);
   9346 	} else
   9347 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9348 		    device_xname(sc->sc_dev)));
   9349 }
   9350 
   9351 /*
   9352  * wm_linkintr:
   9353  *
   9354  *	Helper; handle link interrupts.
   9355  */
   9356 static void
   9357 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9358 {
   9359 
   9360 	KASSERT(WM_CORE_LOCKED(sc));
   9361 
   9362 	if (sc->sc_flags & WM_F_HAS_MII)
   9363 		wm_linkintr_gmii(sc, icr);
   9364 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9365 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9366 		wm_linkintr_serdes(sc, icr);
   9367 	else
   9368 		wm_linkintr_tbi(sc, icr);
   9369 }
   9370 
   9371 /*
   9372  * wm_intr_legacy:
   9373  *
   9374  *	Interrupt service routine for INTx and MSI.
   9375  */
   9376 static int
   9377 wm_intr_legacy(void *arg)
   9378 {
   9379 	struct wm_softc *sc = arg;
   9380 	struct wm_queue *wmq = &sc->sc_queue[0];
   9381 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9382 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9383 	uint32_t icr, rndval = 0;
   9384 	int handled = 0;
   9385 
   9386 	while (1 /* CONSTCOND */) {
   9387 		icr = CSR_READ(sc, WMREG_ICR);
   9388 		if ((icr & sc->sc_icr) == 0)
   9389 			break;
   9390 		if (handled == 0)
   9391 			DPRINTF(WM_DEBUG_TX,
   9392 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9393 		if (rndval == 0)
   9394 			rndval = icr;
   9395 
   9396 		mutex_enter(rxq->rxq_lock);
   9397 
   9398 		if (rxq->rxq_stopping) {
   9399 			mutex_exit(rxq->rxq_lock);
   9400 			break;
   9401 		}
   9402 
   9403 		handled = 1;
   9404 
   9405 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9406 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9407 			DPRINTF(WM_DEBUG_RX,
   9408 			    ("%s: RX: got Rx intr 0x%08x\n",
   9409 				device_xname(sc->sc_dev),
   9410 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9411 			WM_Q_EVCNT_INCR(rxq, intr);
   9412 		}
   9413 #endif
   9414 		/*
   9415 		 * wm_rxeof() does *not* call upper layer functions directly,
   9416 		 * as if_percpuq_enqueue() just call softint_schedule().
   9417 		 * So, we can call wm_rxeof() in interrupt context.
   9418 		 */
   9419 		wm_rxeof(rxq, UINT_MAX);
   9420 
   9421 		mutex_exit(rxq->rxq_lock);
   9422 		mutex_enter(txq->txq_lock);
   9423 
   9424 		if (txq->txq_stopping) {
   9425 			mutex_exit(txq->txq_lock);
   9426 			break;
   9427 		}
   9428 
   9429 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9430 		if (icr & ICR_TXDW) {
   9431 			DPRINTF(WM_DEBUG_TX,
   9432 			    ("%s: TX: got TXDW interrupt\n",
   9433 				device_xname(sc->sc_dev)));
   9434 			WM_Q_EVCNT_INCR(txq, txdw);
   9435 		}
   9436 #endif
   9437 		wm_txeof(txq, UINT_MAX);
   9438 
   9439 		mutex_exit(txq->txq_lock);
   9440 		WM_CORE_LOCK(sc);
   9441 
   9442 		if (sc->sc_core_stopping) {
   9443 			WM_CORE_UNLOCK(sc);
   9444 			break;
   9445 		}
   9446 
   9447 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9448 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9449 			wm_linkintr(sc, icr);
   9450 		}
   9451 
   9452 		WM_CORE_UNLOCK(sc);
   9453 
   9454 		if (icr & ICR_RXO) {
   9455 #if defined(WM_DEBUG)
   9456 			log(LOG_WARNING, "%s: Receive overrun\n",
   9457 			    device_xname(sc->sc_dev));
   9458 #endif /* defined(WM_DEBUG) */
   9459 		}
   9460 	}
   9461 
   9462 	rnd_add_uint32(&sc->rnd_source, rndval);
   9463 
   9464 	if (handled) {
   9465 		/* Try to get more packets going. */
   9466 		softint_schedule(wmq->wmq_si);
   9467 	}
   9468 
   9469 	return handled;
   9470 }
   9471 
   9472 static inline void
   9473 wm_txrxintr_disable(struct wm_queue *wmq)
   9474 {
   9475 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9476 
   9477 	if (sc->sc_type == WM_T_82574)
   9478 		CSR_WRITE(sc, WMREG_IMC,
   9479 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9480 	else if (sc->sc_type == WM_T_82575)
   9481 		CSR_WRITE(sc, WMREG_EIMC,
   9482 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9483 	else
   9484 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9485 }
   9486 
   9487 static inline void
   9488 wm_txrxintr_enable(struct wm_queue *wmq)
   9489 {
   9490 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9491 
   9492 	wm_itrs_calculate(sc, wmq);
   9493 
   9494 	/*
   9495 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9496 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9497 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9498 	 * while each wm_handle_queue(wmq) is runnig.
   9499 	 */
   9500 	if (sc->sc_type == WM_T_82574)
   9501 		CSR_WRITE(sc, WMREG_IMS,
   9502 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9503 	else if (sc->sc_type == WM_T_82575)
   9504 		CSR_WRITE(sc, WMREG_EIMS,
   9505 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9506 	else
   9507 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9508 }
   9509 
   9510 static int
   9511 wm_txrxintr_msix(void *arg)
   9512 {
   9513 	struct wm_queue *wmq = arg;
   9514 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9515 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9516 	struct wm_softc *sc = txq->txq_sc;
   9517 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9518 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9519 	bool txmore;
   9520 	bool rxmore;
   9521 
   9522 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9523 
   9524 	DPRINTF(WM_DEBUG_TX,
   9525 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9526 
   9527 	wm_txrxintr_disable(wmq);
   9528 
   9529 	mutex_enter(txq->txq_lock);
   9530 
   9531 	if (txq->txq_stopping) {
   9532 		mutex_exit(txq->txq_lock);
   9533 		return 0;
   9534 	}
   9535 
   9536 	WM_Q_EVCNT_INCR(txq, txdw);
   9537 	txmore = wm_txeof(txq, txlimit);
   9538 	/* wm_deferred start() is done in wm_handle_queue(). */
   9539 	mutex_exit(txq->txq_lock);
   9540 
   9541 	DPRINTF(WM_DEBUG_RX,
   9542 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9543 	mutex_enter(rxq->rxq_lock);
   9544 
   9545 	if (rxq->rxq_stopping) {
   9546 		mutex_exit(rxq->rxq_lock);
   9547 		return 0;
   9548 	}
   9549 
   9550 	WM_Q_EVCNT_INCR(rxq, intr);
   9551 	rxmore = wm_rxeof(rxq, rxlimit);
   9552 	mutex_exit(rxq->rxq_lock);
   9553 
   9554 	wm_itrs_writereg(sc, wmq);
   9555 
   9556 	if (txmore || rxmore)
   9557 		softint_schedule(wmq->wmq_si);
   9558 	else
   9559 		wm_txrxintr_enable(wmq);
   9560 
   9561 	return 1;
   9562 }
   9563 
   9564 static void
   9565 wm_handle_queue(void *arg)
   9566 {
   9567 	struct wm_queue *wmq = arg;
   9568 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9569 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9570 	struct wm_softc *sc = txq->txq_sc;
   9571 	u_int txlimit = sc->sc_tx_process_limit;
   9572 	u_int rxlimit = sc->sc_rx_process_limit;
   9573 	bool txmore;
   9574 	bool rxmore;
   9575 
   9576 	mutex_enter(txq->txq_lock);
   9577 	if (txq->txq_stopping) {
   9578 		mutex_exit(txq->txq_lock);
   9579 		return;
   9580 	}
   9581 	txmore = wm_txeof(txq, txlimit);
   9582 	wm_deferred_start_locked(txq);
   9583 	mutex_exit(txq->txq_lock);
   9584 
   9585 	mutex_enter(rxq->rxq_lock);
   9586 	if (rxq->rxq_stopping) {
   9587 		mutex_exit(rxq->rxq_lock);
   9588 		return;
   9589 	}
   9590 	WM_Q_EVCNT_INCR(rxq, defer);
   9591 	rxmore = wm_rxeof(rxq, rxlimit);
   9592 	mutex_exit(rxq->rxq_lock);
   9593 
   9594 	if (txmore || rxmore)
   9595 		softint_schedule(wmq->wmq_si);
   9596 	else
   9597 		wm_txrxintr_enable(wmq);
   9598 }
   9599 
   9600 /*
   9601  * wm_linkintr_msix:
   9602  *
   9603  *	Interrupt service routine for link status change for MSI-X.
   9604  */
   9605 static int
   9606 wm_linkintr_msix(void *arg)
   9607 {
   9608 	struct wm_softc *sc = arg;
   9609 	uint32_t reg;
   9610 	bool has_rxo;
   9611 
   9612 	DPRINTF(WM_DEBUG_LINK,
   9613 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9614 
   9615 	reg = CSR_READ(sc, WMREG_ICR);
   9616 	WM_CORE_LOCK(sc);
   9617 	if (sc->sc_core_stopping)
   9618 		goto out;
   9619 
   9620 	if ((reg & ICR_LSC) != 0) {
   9621 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9622 		wm_linkintr(sc, ICR_LSC);
   9623 	}
   9624 
   9625 	/*
   9626 	 * XXX 82574 MSI-X mode workaround
   9627 	 *
   9628 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9629 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9630 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9631 	 * interrupts by writing WMREG_ICS to process receive packets.
   9632 	 */
   9633 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9634 #if defined(WM_DEBUG)
   9635 		log(LOG_WARNING, "%s: Receive overrun\n",
   9636 		    device_xname(sc->sc_dev));
   9637 #endif /* defined(WM_DEBUG) */
   9638 
   9639 		has_rxo = true;
   9640 		/*
   9641 		 * The RXO interrupt is very high rate when receive traffic is
   9642 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9643 		 * interrupts. ICR_OTHER will be enabled at the end of
   9644 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9645 		 * ICR_RXQ(1) interrupts.
   9646 		 */
   9647 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9648 
   9649 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9650 	}
   9651 
   9652 
   9653 
   9654 out:
   9655 	WM_CORE_UNLOCK(sc);
   9656 
   9657 	if (sc->sc_type == WM_T_82574) {
   9658 		if (!has_rxo)
   9659 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9660 		else
   9661 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9662 	} else if (sc->sc_type == WM_T_82575)
   9663 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9664 	else
   9665 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9666 
   9667 	return 1;
   9668 }
   9669 
   9670 /*
   9671  * Media related.
   9672  * GMII, SGMII, TBI (and SERDES)
   9673  */
   9674 
   9675 /* Common */
   9676 
   9677 /*
   9678  * wm_tbi_serdes_set_linkled:
   9679  *
   9680  *	Update the link LED on TBI and SERDES devices.
   9681  */
   9682 static void
   9683 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9684 {
   9685 
   9686 	if (sc->sc_tbi_linkup)
   9687 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9688 	else
   9689 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9690 
   9691 	/* 82540 or newer devices are active low */
   9692 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9693 
   9694 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9695 }
   9696 
   9697 /* GMII related */
   9698 
   9699 /*
   9700  * wm_gmii_reset:
   9701  *
   9702  *	Reset the PHY.
   9703  */
   9704 static void
   9705 wm_gmii_reset(struct wm_softc *sc)
   9706 {
   9707 	uint32_t reg;
   9708 	int rv;
   9709 
   9710 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9711 		device_xname(sc->sc_dev), __func__));
   9712 
   9713 	rv = sc->phy.acquire(sc);
   9714 	if (rv != 0) {
   9715 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9716 		    __func__);
   9717 		return;
   9718 	}
   9719 
   9720 	switch (sc->sc_type) {
   9721 	case WM_T_82542_2_0:
   9722 	case WM_T_82542_2_1:
   9723 		/* null */
   9724 		break;
   9725 	case WM_T_82543:
   9726 		/*
   9727 		 * With 82543, we need to force speed and duplex on the MAC
   9728 		 * equal to what the PHY speed and duplex configuration is.
   9729 		 * In addition, we need to perform a hardware reset on the PHY
   9730 		 * to take it out of reset.
   9731 		 */
   9732 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9733 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9734 
   9735 		/* The PHY reset pin is active-low. */
   9736 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9737 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9738 		    CTRL_EXT_SWDPIN(4));
   9739 		reg |= CTRL_EXT_SWDPIO(4);
   9740 
   9741 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9742 		CSR_WRITE_FLUSH(sc);
   9743 		delay(10*1000);
   9744 
   9745 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9746 		CSR_WRITE_FLUSH(sc);
   9747 		delay(150);
   9748 #if 0
   9749 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9750 #endif
   9751 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9752 		break;
   9753 	case WM_T_82544:	/* reset 10000us */
   9754 	case WM_T_82540:
   9755 	case WM_T_82545:
   9756 	case WM_T_82545_3:
   9757 	case WM_T_82546:
   9758 	case WM_T_82546_3:
   9759 	case WM_T_82541:
   9760 	case WM_T_82541_2:
   9761 	case WM_T_82547:
   9762 	case WM_T_82547_2:
   9763 	case WM_T_82571:	/* reset 100us */
   9764 	case WM_T_82572:
   9765 	case WM_T_82573:
   9766 	case WM_T_82574:
   9767 	case WM_T_82575:
   9768 	case WM_T_82576:
   9769 	case WM_T_82580:
   9770 	case WM_T_I350:
   9771 	case WM_T_I354:
   9772 	case WM_T_I210:
   9773 	case WM_T_I211:
   9774 	case WM_T_82583:
   9775 	case WM_T_80003:
   9776 		/* generic reset */
   9777 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9778 		CSR_WRITE_FLUSH(sc);
   9779 		delay(20000);
   9780 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9781 		CSR_WRITE_FLUSH(sc);
   9782 		delay(20000);
   9783 
   9784 		if ((sc->sc_type == WM_T_82541)
   9785 		    || (sc->sc_type == WM_T_82541_2)
   9786 		    || (sc->sc_type == WM_T_82547)
   9787 		    || (sc->sc_type == WM_T_82547_2)) {
   9788 			/* workaround for igp are done in igp_reset() */
   9789 			/* XXX add code to set LED after phy reset */
   9790 		}
   9791 		break;
   9792 	case WM_T_ICH8:
   9793 	case WM_T_ICH9:
   9794 	case WM_T_ICH10:
   9795 	case WM_T_PCH:
   9796 	case WM_T_PCH2:
   9797 	case WM_T_PCH_LPT:
   9798 	case WM_T_PCH_SPT:
   9799 	case WM_T_PCH_CNP:
   9800 		/* generic reset */
   9801 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9802 		CSR_WRITE_FLUSH(sc);
   9803 		delay(100);
   9804 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9805 		CSR_WRITE_FLUSH(sc);
   9806 		delay(150);
   9807 		break;
   9808 	default:
   9809 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9810 		    __func__);
   9811 		break;
   9812 	}
   9813 
   9814 	sc->phy.release(sc);
   9815 
   9816 	/* get_cfg_done */
   9817 	wm_get_cfg_done(sc);
   9818 
   9819 	/* extra setup */
   9820 	switch (sc->sc_type) {
   9821 	case WM_T_82542_2_0:
   9822 	case WM_T_82542_2_1:
   9823 	case WM_T_82543:
   9824 	case WM_T_82544:
   9825 	case WM_T_82540:
   9826 	case WM_T_82545:
   9827 	case WM_T_82545_3:
   9828 	case WM_T_82546:
   9829 	case WM_T_82546_3:
   9830 	case WM_T_82541_2:
   9831 	case WM_T_82547_2:
   9832 	case WM_T_82571:
   9833 	case WM_T_82572:
   9834 	case WM_T_82573:
   9835 	case WM_T_82574:
   9836 	case WM_T_82583:
   9837 	case WM_T_82575:
   9838 	case WM_T_82576:
   9839 	case WM_T_82580:
   9840 	case WM_T_I350:
   9841 	case WM_T_I354:
   9842 	case WM_T_I210:
   9843 	case WM_T_I211:
   9844 	case WM_T_80003:
   9845 		/* null */
   9846 		break;
   9847 	case WM_T_82541:
   9848 	case WM_T_82547:
   9849 		/* XXX Configure actively LED after PHY reset */
   9850 		break;
   9851 	case WM_T_ICH8:
   9852 	case WM_T_ICH9:
   9853 	case WM_T_ICH10:
   9854 	case WM_T_PCH:
   9855 	case WM_T_PCH2:
   9856 	case WM_T_PCH_LPT:
   9857 	case WM_T_PCH_SPT:
   9858 	case WM_T_PCH_CNP:
   9859 		wm_phy_post_reset(sc);
   9860 		break;
   9861 	default:
   9862 		panic("%s: unknown type\n", __func__);
   9863 		break;
   9864 	}
   9865 }
   9866 
   9867 /*
   9868  * Setup sc_phytype and mii_{read|write}reg.
   9869  *
   9870  *  To identify PHY type, correct read/write function should be selected.
   9871  * To select correct read/write function, PCI ID or MAC type are required
   9872  * without accessing PHY registers.
   9873  *
   9874  *  On the first call of this function, PHY ID is not known yet. Check
   9875  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9876  * result might be incorrect.
   9877  *
   9878  *  In the second call, PHY OUI and model is used to identify PHY type.
   9879  * It might not be perfpect because of the lack of compared entry, but it
   9880  * would be better than the first call.
   9881  *
   9882  *  If the detected new result and previous assumption is different,
   9883  * diagnous message will be printed.
   9884  */
   9885 static void
   9886 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9887     uint16_t phy_model)
   9888 {
   9889 	device_t dev = sc->sc_dev;
   9890 	struct mii_data *mii = &sc->sc_mii;
   9891 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9892 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9893 	mii_readreg_t new_readreg;
   9894 	mii_writereg_t new_writereg;
   9895 
   9896 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9897 		device_xname(sc->sc_dev), __func__));
   9898 
   9899 	if (mii->mii_readreg == NULL) {
   9900 		/*
   9901 		 *  This is the first call of this function. For ICH and PCH
   9902 		 * variants, it's difficult to determine the PHY access method
   9903 		 * by sc_type, so use the PCI product ID for some devices.
   9904 		 */
   9905 
   9906 		switch (sc->sc_pcidevid) {
   9907 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9908 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9909 			/* 82577 */
   9910 			new_phytype = WMPHY_82577;
   9911 			break;
   9912 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9913 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9914 			/* 82578 */
   9915 			new_phytype = WMPHY_82578;
   9916 			break;
   9917 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9918 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9919 			/* 82579 */
   9920 			new_phytype = WMPHY_82579;
   9921 			break;
   9922 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9923 		case PCI_PRODUCT_INTEL_82801I_BM:
   9924 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9925 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9926 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9927 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9928 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9929 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9930 			/* ICH8, 9, 10 with 82567 */
   9931 			new_phytype = WMPHY_BM;
   9932 			break;
   9933 		default:
   9934 			break;
   9935 		}
   9936 	} else {
   9937 		/* It's not the first call. Use PHY OUI and model */
   9938 		switch (phy_oui) {
   9939 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9940 			switch (phy_model) {
   9941 			case 0x0004: /* XXX */
   9942 				new_phytype = WMPHY_82578;
   9943 				break;
   9944 			default:
   9945 				break;
   9946 			}
   9947 			break;
   9948 		case MII_OUI_xxMARVELL:
   9949 			switch (phy_model) {
   9950 			case MII_MODEL_xxMARVELL_I210:
   9951 				new_phytype = WMPHY_I210;
   9952 				break;
   9953 			case MII_MODEL_xxMARVELL_E1011:
   9954 			case MII_MODEL_xxMARVELL_E1000_3:
   9955 			case MII_MODEL_xxMARVELL_E1000_5:
   9956 			case MII_MODEL_xxMARVELL_E1112:
   9957 				new_phytype = WMPHY_M88;
   9958 				break;
   9959 			case MII_MODEL_xxMARVELL_E1149:
   9960 				new_phytype = WMPHY_BM;
   9961 				break;
   9962 			case MII_MODEL_xxMARVELL_E1111:
   9963 			case MII_MODEL_xxMARVELL_I347:
   9964 			case MII_MODEL_xxMARVELL_E1512:
   9965 			case MII_MODEL_xxMARVELL_E1340M:
   9966 			case MII_MODEL_xxMARVELL_E1543:
   9967 				new_phytype = WMPHY_M88;
   9968 				break;
   9969 			case MII_MODEL_xxMARVELL_I82563:
   9970 				new_phytype = WMPHY_GG82563;
   9971 				break;
   9972 			default:
   9973 				break;
   9974 			}
   9975 			break;
   9976 		case MII_OUI_INTEL:
   9977 			switch (phy_model) {
   9978 			case MII_MODEL_INTEL_I82577:
   9979 				new_phytype = WMPHY_82577;
   9980 				break;
   9981 			case MII_MODEL_INTEL_I82579:
   9982 				new_phytype = WMPHY_82579;
   9983 				break;
   9984 			case MII_MODEL_INTEL_I217:
   9985 				new_phytype = WMPHY_I217;
   9986 				break;
   9987 			case MII_MODEL_INTEL_I82580:
   9988 			case MII_MODEL_INTEL_I350:
   9989 				new_phytype = WMPHY_82580;
   9990 				break;
   9991 			default:
   9992 				break;
   9993 			}
   9994 			break;
   9995 		case MII_OUI_yyINTEL:
   9996 			switch (phy_model) {
   9997 			case MII_MODEL_yyINTEL_I82562G:
   9998 			case MII_MODEL_yyINTEL_I82562EM:
   9999 			case MII_MODEL_yyINTEL_I82562ET:
   10000 				new_phytype = WMPHY_IFE;
   10001 				break;
   10002 			case MII_MODEL_yyINTEL_IGP01E1000:
   10003 				new_phytype = WMPHY_IGP;
   10004 				break;
   10005 			case MII_MODEL_yyINTEL_I82566:
   10006 				new_phytype = WMPHY_IGP_3;
   10007 				break;
   10008 			default:
   10009 				break;
   10010 			}
   10011 			break;
   10012 		default:
   10013 			break;
   10014 		}
   10015 		if (new_phytype == WMPHY_UNKNOWN)
   10016 			aprint_verbose_dev(dev,
   10017 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10018 			    __func__, phy_oui, phy_model);
   10019 
   10020 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10021 		    && (sc->sc_phytype != new_phytype )) {
   10022 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10023 			    "was incorrect. PHY type from PHY ID = %u\n",
   10024 			    sc->sc_phytype, new_phytype);
   10025 		}
   10026 	}
   10027 
   10028 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10029 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10030 		/* SGMII */
   10031 		new_readreg = wm_sgmii_readreg;
   10032 		new_writereg = wm_sgmii_writereg;
   10033 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10034 		/* BM2 (phyaddr == 1) */
   10035 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10036 		    && (new_phytype != WMPHY_BM)
   10037 		    && (new_phytype != WMPHY_UNKNOWN))
   10038 			doubt_phytype = new_phytype;
   10039 		new_phytype = WMPHY_BM;
   10040 		new_readreg = wm_gmii_bm_readreg;
   10041 		new_writereg = wm_gmii_bm_writereg;
   10042 	} else if (sc->sc_type >= WM_T_PCH) {
   10043 		/* All PCH* use _hv_ */
   10044 		new_readreg = wm_gmii_hv_readreg;
   10045 		new_writereg = wm_gmii_hv_writereg;
   10046 	} else if (sc->sc_type >= WM_T_ICH8) {
   10047 		/* non-82567 ICH8, 9 and 10 */
   10048 		new_readreg = wm_gmii_i82544_readreg;
   10049 		new_writereg = wm_gmii_i82544_writereg;
   10050 	} else if (sc->sc_type >= WM_T_80003) {
   10051 		/* 80003 */
   10052 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10053 		    && (new_phytype != WMPHY_GG82563)
   10054 		    && (new_phytype != WMPHY_UNKNOWN))
   10055 			doubt_phytype = new_phytype;
   10056 		new_phytype = WMPHY_GG82563;
   10057 		new_readreg = wm_gmii_i80003_readreg;
   10058 		new_writereg = wm_gmii_i80003_writereg;
   10059 	} else if (sc->sc_type >= WM_T_I210) {
   10060 		/* I210 and I211 */
   10061 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10062 		    && (new_phytype != WMPHY_I210)
   10063 		    && (new_phytype != WMPHY_UNKNOWN))
   10064 			doubt_phytype = new_phytype;
   10065 		new_phytype = WMPHY_I210;
   10066 		new_readreg = wm_gmii_gs40g_readreg;
   10067 		new_writereg = wm_gmii_gs40g_writereg;
   10068 	} else if (sc->sc_type >= WM_T_82580) {
   10069 		/* 82580, I350 and I354 */
   10070 		new_readreg = wm_gmii_82580_readreg;
   10071 		new_writereg = wm_gmii_82580_writereg;
   10072 	} else if (sc->sc_type >= WM_T_82544) {
   10073 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10074 		new_readreg = wm_gmii_i82544_readreg;
   10075 		new_writereg = wm_gmii_i82544_writereg;
   10076 	} else {
   10077 		new_readreg = wm_gmii_i82543_readreg;
   10078 		new_writereg = wm_gmii_i82543_writereg;
   10079 	}
   10080 
   10081 	if (new_phytype == WMPHY_BM) {
   10082 		/* All BM use _bm_ */
   10083 		new_readreg = wm_gmii_bm_readreg;
   10084 		new_writereg = wm_gmii_bm_writereg;
   10085 	}
   10086 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10087 		/* All PCH* use _hv_ */
   10088 		new_readreg = wm_gmii_hv_readreg;
   10089 		new_writereg = wm_gmii_hv_writereg;
   10090 	}
   10091 
   10092 	/* Diag output */
   10093 	if (doubt_phytype != WMPHY_UNKNOWN)
   10094 		aprint_error_dev(dev, "Assumed new PHY type was "
   10095 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10096 		    new_phytype);
   10097 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10098 	    && (sc->sc_phytype != new_phytype ))
   10099 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10100 		    "was incorrect. New PHY type = %u\n",
   10101 		    sc->sc_phytype, new_phytype);
   10102 
   10103 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10104 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10105 
   10106 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10107 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10108 		    "function was incorrect.\n");
   10109 
   10110 	/* Update now */
   10111 	sc->sc_phytype = new_phytype;
   10112 	mii->mii_readreg = new_readreg;
   10113 	mii->mii_writereg = new_writereg;
   10114 	if (new_readreg == wm_gmii_hv_readreg) {
   10115 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10116 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10117 	} else if (new_readreg == wm_sgmii_readreg) {
   10118 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10119 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10120 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10121 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10122 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10123 	}
   10124 }
   10125 
   10126 /*
   10127  * wm_get_phy_id_82575:
   10128  *
   10129  * Return PHY ID. Return -1 if it failed.
   10130  */
   10131 static int
   10132 wm_get_phy_id_82575(struct wm_softc *sc)
   10133 {
   10134 	uint32_t reg;
   10135 	int phyid = -1;
   10136 
   10137 	/* XXX */
   10138 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10139 		return -1;
   10140 
   10141 	if (wm_sgmii_uses_mdio(sc)) {
   10142 		switch (sc->sc_type) {
   10143 		case WM_T_82575:
   10144 		case WM_T_82576:
   10145 			reg = CSR_READ(sc, WMREG_MDIC);
   10146 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10147 			break;
   10148 		case WM_T_82580:
   10149 		case WM_T_I350:
   10150 		case WM_T_I354:
   10151 		case WM_T_I210:
   10152 		case WM_T_I211:
   10153 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10154 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10155 			break;
   10156 		default:
   10157 			return -1;
   10158 		}
   10159 	}
   10160 
   10161 	return phyid;
   10162 }
   10163 
   10164 
   10165 /*
   10166  * wm_gmii_mediainit:
   10167  *
   10168  *	Initialize media for use on 1000BASE-T devices.
   10169  */
   10170 static void
   10171 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10172 {
   10173 	device_t dev = sc->sc_dev;
   10174 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10175 	struct mii_data *mii = &sc->sc_mii;
   10176 	uint32_t reg;
   10177 
   10178 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10179 		device_xname(sc->sc_dev), __func__));
   10180 
   10181 	/* We have GMII. */
   10182 	sc->sc_flags |= WM_F_HAS_MII;
   10183 
   10184 	if (sc->sc_type == WM_T_80003)
   10185 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10186 	else
   10187 		sc->sc_tipg = TIPG_1000T_DFLT;
   10188 
   10189 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10190 	if ((sc->sc_type == WM_T_82580)
   10191 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10192 	    || (sc->sc_type == WM_T_I211)) {
   10193 		reg = CSR_READ(sc, WMREG_PHPM);
   10194 		reg &= ~PHPM_GO_LINK_D;
   10195 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10196 	}
   10197 
   10198 	/*
   10199 	 * Let the chip set speed/duplex on its own based on
   10200 	 * signals from the PHY.
   10201 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10202 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10203 	 */
   10204 	sc->sc_ctrl |= CTRL_SLU;
   10205 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10206 
   10207 	/* Initialize our media structures and probe the GMII. */
   10208 	mii->mii_ifp = ifp;
   10209 
   10210 	mii->mii_statchg = wm_gmii_statchg;
   10211 
   10212 	/* get PHY control from SMBus to PCIe */
   10213 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10214 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10215 	    || (sc->sc_type == WM_T_PCH_CNP))
   10216 		wm_init_phy_workarounds_pchlan(sc);
   10217 
   10218 	wm_gmii_reset(sc);
   10219 
   10220 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10221 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10222 	    wm_gmii_mediastatus);
   10223 
   10224 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10225 	    || (sc->sc_type == WM_T_82580)
   10226 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10227 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10228 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10229 			/* Attach only one port */
   10230 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10231 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10232 		} else {
   10233 			int i, id;
   10234 			uint32_t ctrl_ext;
   10235 
   10236 			id = wm_get_phy_id_82575(sc);
   10237 			if (id != -1) {
   10238 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10239 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10240 			}
   10241 			if ((id == -1)
   10242 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10243 				/* Power on sgmii phy if it is disabled */
   10244 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10245 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10246 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10247 				CSR_WRITE_FLUSH(sc);
   10248 				delay(300*1000); /* XXX too long */
   10249 
   10250 				/* from 1 to 8 */
   10251 				for (i = 1; i < 8; i++)
   10252 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10253 					    0xffffffff, i, MII_OFFSET_ANY,
   10254 					    MIIF_DOPAUSE);
   10255 
   10256 				/* restore previous sfp cage power state */
   10257 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10258 			}
   10259 		}
   10260 	} else
   10261 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10262 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10263 
   10264 	/*
   10265 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10266 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10267 	 */
   10268 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10269 		|| (sc->sc_type == WM_T_PCH_SPT)
   10270 		|| (sc->sc_type == WM_T_PCH_CNP))
   10271 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10272 		wm_set_mdio_slow_mode_hv(sc);
   10273 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10274 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10275 	}
   10276 
   10277 	/*
   10278 	 * (For ICH8 variants)
   10279 	 * If PHY detection failed, use BM's r/w function and retry.
   10280 	 */
   10281 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10282 		/* if failed, retry with *_bm_* */
   10283 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10284 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10285 		    sc->sc_phytype);
   10286 		sc->sc_phytype = WMPHY_BM;
   10287 		mii->mii_readreg = wm_gmii_bm_readreg;
   10288 		mii->mii_writereg = wm_gmii_bm_writereg;
   10289 
   10290 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10291 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10292 	}
   10293 
   10294 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10295 		/* Any PHY wasn't find */
   10296 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10297 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10298 		sc->sc_phytype = WMPHY_NONE;
   10299 	} else {
   10300 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10301 
   10302 		/*
   10303 		 * PHY Found! Check PHY type again by the second call of
   10304 		 * wm_gmii_setup_phytype.
   10305 		 */
   10306 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10307 		    child->mii_mpd_model);
   10308 
   10309 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10310 	}
   10311 }
   10312 
   10313 /*
   10314  * wm_gmii_mediachange:	[ifmedia interface function]
   10315  *
   10316  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10317  */
   10318 static int
   10319 wm_gmii_mediachange(struct ifnet *ifp)
   10320 {
   10321 	struct wm_softc *sc = ifp->if_softc;
   10322 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10323 	int rc;
   10324 
   10325 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10326 		device_xname(sc->sc_dev), __func__));
   10327 	if ((ifp->if_flags & IFF_UP) == 0)
   10328 		return 0;
   10329 
   10330 	/* Disable D0 LPLU. */
   10331 	wm_lplu_d0_disable(sc);
   10332 
   10333 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10334 	sc->sc_ctrl |= CTRL_SLU;
   10335 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10336 	    || (sc->sc_type > WM_T_82543)) {
   10337 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10338 	} else {
   10339 		sc->sc_ctrl &= ~CTRL_ASDE;
   10340 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10341 		if (ife->ifm_media & IFM_FDX)
   10342 			sc->sc_ctrl |= CTRL_FD;
   10343 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10344 		case IFM_10_T:
   10345 			sc->sc_ctrl |= CTRL_SPEED_10;
   10346 			break;
   10347 		case IFM_100_TX:
   10348 			sc->sc_ctrl |= CTRL_SPEED_100;
   10349 			break;
   10350 		case IFM_1000_T:
   10351 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10352 			break;
   10353 		case IFM_NONE:
   10354 			/* There is no specific setting for IFM_NONE */
   10355 			break;
   10356 		default:
   10357 			panic("wm_gmii_mediachange: bad media 0x%x",
   10358 			    ife->ifm_media);
   10359 		}
   10360 	}
   10361 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10362 	CSR_WRITE_FLUSH(sc);
   10363 	if (sc->sc_type <= WM_T_82543)
   10364 		wm_gmii_reset(sc);
   10365 
   10366 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10367 		return 0;
   10368 	return rc;
   10369 }
   10370 
   10371 /*
   10372  * wm_gmii_mediastatus:	[ifmedia interface function]
   10373  *
   10374  *	Get the current interface media status on a 1000BASE-T device.
   10375  */
   10376 static void
   10377 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10378 {
   10379 	struct wm_softc *sc = ifp->if_softc;
   10380 
   10381 	ether_mediastatus(ifp, ifmr);
   10382 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10383 	    | sc->sc_flowflags;
   10384 }
   10385 
   10386 #define	MDI_IO		CTRL_SWDPIN(2)
   10387 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10388 #define	MDI_CLK		CTRL_SWDPIN(3)
   10389 
   10390 static void
   10391 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10392 {
   10393 	uint32_t i, v;
   10394 
   10395 	v = CSR_READ(sc, WMREG_CTRL);
   10396 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10397 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10398 
   10399 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10400 		if (data & i)
   10401 			v |= MDI_IO;
   10402 		else
   10403 			v &= ~MDI_IO;
   10404 		CSR_WRITE(sc, WMREG_CTRL, v);
   10405 		CSR_WRITE_FLUSH(sc);
   10406 		delay(10);
   10407 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10408 		CSR_WRITE_FLUSH(sc);
   10409 		delay(10);
   10410 		CSR_WRITE(sc, WMREG_CTRL, v);
   10411 		CSR_WRITE_FLUSH(sc);
   10412 		delay(10);
   10413 	}
   10414 }
   10415 
   10416 static uint16_t
   10417 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10418 {
   10419 	uint32_t v, i;
   10420 	uint16_t data = 0;
   10421 
   10422 	v = CSR_READ(sc, WMREG_CTRL);
   10423 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10424 	v |= CTRL_SWDPIO(3);
   10425 
   10426 	CSR_WRITE(sc, WMREG_CTRL, v);
   10427 	CSR_WRITE_FLUSH(sc);
   10428 	delay(10);
   10429 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10430 	CSR_WRITE_FLUSH(sc);
   10431 	delay(10);
   10432 	CSR_WRITE(sc, WMREG_CTRL, v);
   10433 	CSR_WRITE_FLUSH(sc);
   10434 	delay(10);
   10435 
   10436 	for (i = 0; i < 16; i++) {
   10437 		data <<= 1;
   10438 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10439 		CSR_WRITE_FLUSH(sc);
   10440 		delay(10);
   10441 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10442 			data |= 1;
   10443 		CSR_WRITE(sc, WMREG_CTRL, v);
   10444 		CSR_WRITE_FLUSH(sc);
   10445 		delay(10);
   10446 	}
   10447 
   10448 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10449 	CSR_WRITE_FLUSH(sc);
   10450 	delay(10);
   10451 	CSR_WRITE(sc, WMREG_CTRL, v);
   10452 	CSR_WRITE_FLUSH(sc);
   10453 	delay(10);
   10454 
   10455 	return data;
   10456 }
   10457 
   10458 #undef MDI_IO
   10459 #undef MDI_DIR
   10460 #undef MDI_CLK
   10461 
   10462 /*
   10463  * wm_gmii_i82543_readreg:	[mii interface function]
   10464  *
   10465  *	Read a PHY register on the GMII (i82543 version).
   10466  */
   10467 static int
   10468 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10469 {
   10470 	struct wm_softc *sc = device_private(dev);
   10471 
   10472 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10473 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10474 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10475 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10476 
   10477 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10478 		device_xname(dev), phy, reg, *val));
   10479 
   10480 	return 0;
   10481 }
   10482 
   10483 /*
   10484  * wm_gmii_i82543_writereg:	[mii interface function]
   10485  *
   10486  *	Write a PHY register on the GMII (i82543 version).
   10487  */
   10488 static int
   10489 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10490 {
   10491 	struct wm_softc *sc = device_private(dev);
   10492 
   10493 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10494 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10495 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10496 	    (MII_COMMAND_START << 30), 32);
   10497 
   10498 	return 0;
   10499 }
   10500 
   10501 /*
   10502  * wm_gmii_mdic_readreg:	[mii interface function]
   10503  *
   10504  *	Read a PHY register on the GMII.
   10505  */
   10506 static int
   10507 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10508 {
   10509 	struct wm_softc *sc = device_private(dev);
   10510 	uint32_t mdic = 0;
   10511 	int i;
   10512 
   10513 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10514 	    && (reg > MII_ADDRMASK)) {
   10515 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10516 		    __func__, sc->sc_phytype, reg);
   10517 		reg &= MII_ADDRMASK;
   10518 	}
   10519 
   10520 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10521 	    MDIC_REGADD(reg));
   10522 
   10523 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10524 		delay(50);
   10525 		mdic = CSR_READ(sc, WMREG_MDIC);
   10526 		if (mdic & MDIC_READY)
   10527 			break;
   10528 	}
   10529 
   10530 	if ((mdic & MDIC_READY) == 0) {
   10531 		DPRINTF(WM_DEBUG_GMII,
   10532 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10533 			device_xname(dev), phy, reg));
   10534 		return ETIMEDOUT;
   10535 	} else if (mdic & MDIC_E) {
   10536 		/* This is normal if no PHY is present. */
   10537 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10538 			device_xname(sc->sc_dev), phy, reg));
   10539 		return -1;
   10540 	} else
   10541 		*val = MDIC_DATA(mdic);
   10542 
   10543 	/*
   10544 	 * Allow some time after each MDIC transaction to avoid
   10545 	 * reading duplicate data in the next MDIC transaction.
   10546 	 */
   10547 	if (sc->sc_type == WM_T_PCH2)
   10548 		delay(100);
   10549 
   10550 	return 0;
   10551 }
   10552 
   10553 /*
   10554  * wm_gmii_mdic_writereg:	[mii interface function]
   10555  *
   10556  *	Write a PHY register on the GMII.
   10557  */
   10558 static int
   10559 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10560 {
   10561 	struct wm_softc *sc = device_private(dev);
   10562 	uint32_t mdic = 0;
   10563 	int i;
   10564 
   10565 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10566 	    && (reg > MII_ADDRMASK)) {
   10567 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10568 		    __func__, sc->sc_phytype, reg);
   10569 		reg &= MII_ADDRMASK;
   10570 	}
   10571 
   10572 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10573 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10574 
   10575 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10576 		delay(50);
   10577 		mdic = CSR_READ(sc, WMREG_MDIC);
   10578 		if (mdic & MDIC_READY)
   10579 			break;
   10580 	}
   10581 
   10582 	if ((mdic & MDIC_READY) == 0) {
   10583 		DPRINTF(WM_DEBUG_GMII,
   10584 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10585 			device_xname(dev), phy, reg));
   10586 		return ETIMEDOUT;
   10587 	} else if (mdic & MDIC_E) {
   10588 		DPRINTF(WM_DEBUG_GMII,
   10589 		    ("%s: MDIC write error: phy %d reg %d\n",
   10590 			device_xname(dev), phy, reg));
   10591 		return -1;
   10592 	}
   10593 
   10594 	/*
   10595 	 * Allow some time after each MDIC transaction to avoid
   10596 	 * reading duplicate data in the next MDIC transaction.
   10597 	 */
   10598 	if (sc->sc_type == WM_T_PCH2)
   10599 		delay(100);
   10600 
   10601 	return 0;
   10602 }
   10603 
   10604 /*
   10605  * wm_gmii_i82544_readreg:	[mii interface function]
   10606  *
   10607  *	Read a PHY register on the GMII.
   10608  */
   10609 static int
   10610 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10611 {
   10612 	struct wm_softc *sc = device_private(dev);
   10613 	int rv;
   10614 
   10615 	if (sc->phy.acquire(sc)) {
   10616 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10617 		return -1;
   10618 	}
   10619 
   10620 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10621 
   10622 	sc->phy.release(sc);
   10623 
   10624 	return rv;
   10625 }
   10626 
   10627 static int
   10628 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10629 {
   10630 	struct wm_softc *sc = device_private(dev);
   10631 
   10632 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10633 		switch (sc->sc_phytype) {
   10634 		case WMPHY_IGP:
   10635 		case WMPHY_IGP_2:
   10636 		case WMPHY_IGP_3:
   10637 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10638 			    reg);
   10639 			break;
   10640 		default:
   10641 #ifdef WM_DEBUG
   10642 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10643 			    __func__, sc->sc_phytype, reg);
   10644 #endif
   10645 			break;
   10646 		}
   10647 	}
   10648 
   10649 	wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10650 
   10651 	return 0;
   10652 }
   10653 
   10654 /*
   10655  * wm_gmii_i82544_writereg:	[mii interface function]
   10656  *
   10657  *	Write a PHY register on the GMII.
   10658  */
   10659 static int
   10660 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10661 {
   10662 	struct wm_softc *sc = device_private(dev);
   10663 	int rv;
   10664 
   10665 	if (sc->phy.acquire(sc)) {
   10666 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10667 		return -1;
   10668 	}
   10669 
   10670 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10671 	sc->phy.release(sc);
   10672 
   10673 	return rv;
   10674 }
   10675 
   10676 static int
   10677 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10678 {
   10679 	struct wm_softc *sc = device_private(dev);
   10680 
   10681 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10682 		switch (sc->sc_phytype) {
   10683 		case WMPHY_IGP:
   10684 		case WMPHY_IGP_2:
   10685 		case WMPHY_IGP_3:
   10686 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10687 			    reg);
   10688 			break;
   10689 		default:
   10690 #ifdef WM_DEBUG
   10691 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10692 			    __func__, sc->sc_phytype, reg);
   10693 #endif
   10694 			break;
   10695 		}
   10696 	}
   10697 
   10698 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10699 
   10700 	return 0;
   10701 }
   10702 
   10703 /*
   10704  * wm_gmii_i80003_readreg:	[mii interface function]
   10705  *
   10706  *	Read a PHY register on the kumeran
   10707  * This could be handled by the PHY layer if we didn't have to lock the
   10708  * ressource ...
   10709  */
   10710 static int
   10711 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10712 {
   10713 	struct wm_softc *sc = device_private(dev);
   10714 	int page_select;
   10715 	uint16_t temp, temp2;
   10716 	int rv = 0;
   10717 
   10718 	if (phy != 1) /* only one PHY on kumeran bus */
   10719 		return -1;
   10720 
   10721 	if (sc->phy.acquire(sc)) {
   10722 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10723 		return -1;
   10724 	}
   10725 
   10726 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10727 		page_select = GG82563_PHY_PAGE_SELECT;
   10728 	else {
   10729 		/*
   10730 		 * Use Alternative Page Select register to access registers
   10731 		 * 30 and 31.
   10732 		 */
   10733 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10734 	}
   10735 	temp = reg >> GG82563_PAGE_SHIFT;
   10736 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10737 		goto out;
   10738 
   10739 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10740 		/*
   10741 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10742 		 * register.
   10743 		 */
   10744 		delay(200);
   10745 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10746 		if (temp2 != temp) {
   10747 			device_printf(dev, "%s failed\n", __func__);
   10748 			rv = -1;
   10749 			goto out;
   10750 		}
   10751 		delay(200);
   10752 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10753 		delay(200);
   10754 	} else
   10755 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10756 
   10757 out:
   10758 	sc->phy.release(sc);
   10759 	return rv;
   10760 }
   10761 
   10762 /*
   10763  * wm_gmii_i80003_writereg:	[mii interface function]
   10764  *
   10765  *	Write a PHY register on the kumeran.
   10766  * This could be handled by the PHY layer if we didn't have to lock the
   10767  * ressource ...
   10768  */
   10769 static int
   10770 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10771 {
   10772 	struct wm_softc *sc = device_private(dev);
   10773 	int page_select, rv;
   10774 	uint16_t temp, temp2;
   10775 
   10776 	if (phy != 1) /* only one PHY on kumeran bus */
   10777 		return -1;
   10778 
   10779 	if (sc->phy.acquire(sc)) {
   10780 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10781 		return -1;
   10782 	}
   10783 
   10784 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10785 		page_select = GG82563_PHY_PAGE_SELECT;
   10786 	else {
   10787 		/*
   10788 		 * Use Alternative Page Select register to access registers
   10789 		 * 30 and 31.
   10790 		 */
   10791 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10792 	}
   10793 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10794 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10795 		goto out;
   10796 
   10797 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10798 		/*
   10799 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10800 		 * register.
   10801 		 */
   10802 		delay(200);
   10803 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10804 		if (temp2 != temp) {
   10805 			device_printf(dev, "%s failed\n", __func__);
   10806 			rv = -1;
   10807 			goto out;
   10808 		}
   10809 		delay(200);
   10810 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10811 		delay(200);
   10812 	} else
   10813 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10814 
   10815 out:
   10816 	sc->phy.release(sc);
   10817 	return rv;
   10818 }
   10819 
   10820 /*
   10821  * wm_gmii_bm_readreg:	[mii interface function]
   10822  *
   10823  *	Read a PHY register on the kumeran
   10824  * This could be handled by the PHY layer if we didn't have to lock the
   10825  * ressource ...
   10826  */
   10827 static int
   10828 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10829 {
   10830 	struct wm_softc *sc = device_private(dev);
   10831 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10832 	int rv;
   10833 
   10834 	if (sc->phy.acquire(sc)) {
   10835 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10836 		return -1;
   10837 	}
   10838 
   10839 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10840 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10841 		    || (reg == 31)) ? 1 : phy;
   10842 	/* Page 800 works differently than the rest so it has its own func */
   10843 	if (page == BM_WUC_PAGE) {
   10844 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10845 		goto release;
   10846 	}
   10847 
   10848 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10849 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10850 		    && (sc->sc_type != WM_T_82583))
   10851 			rv = wm_gmii_mdic_writereg(dev, phy,
   10852 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10853 		else
   10854 			rv = wm_gmii_mdic_writereg(dev, phy,
   10855 			    BME1000_PHY_PAGE_SELECT, page);
   10856 		if (rv != 0)
   10857 			goto release;
   10858 	}
   10859 
   10860 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10861 
   10862 release:
   10863 	sc->phy.release(sc);
   10864 	return rv;
   10865 }
   10866 
   10867 /*
   10868  * wm_gmii_bm_writereg:	[mii interface function]
   10869  *
   10870  *	Write a PHY register on the kumeran.
   10871  * This could be handled by the PHY layer if we didn't have to lock the
   10872  * ressource ...
   10873  */
   10874 static int
   10875 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10876 {
   10877 	struct wm_softc *sc = device_private(dev);
   10878 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10879 	int rv;
   10880 
   10881 	if (sc->phy.acquire(sc)) {
   10882 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10883 		return -1;
   10884 	}
   10885 
   10886 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10887 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10888 		    || (reg == 31)) ? 1 : phy;
   10889 	/* Page 800 works differently than the rest so it has its own func */
   10890 	if (page == BM_WUC_PAGE) {
   10891 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10892 		goto release;
   10893 	}
   10894 
   10895 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10896 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10897 		    && (sc->sc_type != WM_T_82583))
   10898 			rv = wm_gmii_mdic_writereg(dev, phy,
   10899 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10900 		else
   10901 			rv = wm_gmii_mdic_writereg(dev, phy,
   10902 			    BME1000_PHY_PAGE_SELECT, page);
   10903 		if (rv != 0)
   10904 			goto release;
   10905 	}
   10906 
   10907 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10908 
   10909 release:
   10910 	sc->phy.release(sc);
   10911 	return rv;
   10912 }
   10913 
   10914 /*
   10915  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10916  *  @dev: pointer to the HW structure
   10917  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10918  *
   10919  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10920  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10921  */
   10922 static int
   10923 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10924 {
   10925 	uint16_t temp;
   10926 	int rv;
   10927 
   10928 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10929 		device_xname(dev), __func__));
   10930 
   10931 	if (!phy_regp)
   10932 		return -1;
   10933 
   10934 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10935 
   10936 	/* Select Port Control Registers page */
   10937 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10938 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10939 	if (rv != 0)
   10940 		return rv;
   10941 
   10942 	/* Read WUCE and save it */
   10943 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10944 	if (rv != 0)
   10945 		return rv;
   10946 
   10947 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10948 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10949 	 */
   10950 	temp = *phy_regp;
   10951 	temp |= BM_WUC_ENABLE_BIT;
   10952 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10953 
   10954 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10955 		return rv;
   10956 
   10957 	/* Select Host Wakeup Registers page - caller now able to write
   10958 	 * registers on the Wakeup registers page
   10959 	 */
   10960 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10961 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10962 }
   10963 
   10964 /*
   10965  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10966  *  @dev: pointer to the HW structure
   10967  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10968  *
   10969  *  Restore BM_WUC_ENABLE_REG to its original value.
   10970  *
   10971  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10972  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10973  *  caller.
   10974  */
   10975 static int
   10976 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10977 {
   10978 
   10979 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10980 		device_xname(dev), __func__));
   10981 
   10982 	if (!phy_regp)
   10983 		return -1;
   10984 
   10985 	/* Select Port Control Registers page */
   10986 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10987 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10988 
   10989 	/* Restore 769.17 to its original value */
   10990 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10991 
   10992 	return 0;
   10993 }
   10994 
   10995 /*
   10996  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10997  *  @sc: pointer to the HW structure
   10998  *  @offset: register offset to be read or written
   10999  *  @val: pointer to the data to read or write
   11000  *  @rd: determines if operation is read or write
   11001  *  @page_set: BM_WUC_PAGE already set and access enabled
   11002  *
   11003  *  Read the PHY register at offset and store the retrieved information in
   11004  *  data, or write data to PHY register at offset.  Note the procedure to
   11005  *  access the PHY wakeup registers is different than reading the other PHY
   11006  *  registers. It works as such:
   11007  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11008  *  2) Set page to 800 for host (801 if we were manageability)
   11009  *  3) Write the address using the address opcode (0x11)
   11010  *  4) Read or write the data using the data opcode (0x12)
   11011  *  5) Restore 769.17.2 to its original value
   11012  *
   11013  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11014  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11015  *
   11016  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11017  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11018  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11019  */
   11020 static int
   11021 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11022 	bool page_set)
   11023 {
   11024 	struct wm_softc *sc = device_private(dev);
   11025 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11026 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11027 	uint16_t wuce;
   11028 	int rv = 0;
   11029 
   11030 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11031 		device_xname(dev), __func__));
   11032 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11033 	if ((sc->sc_type == WM_T_PCH)
   11034 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11035 		device_printf(dev,
   11036 		    "Attempting to access page %d while gig enabled.\n", page);
   11037 	}
   11038 
   11039 	if (!page_set) {
   11040 		/* Enable access to PHY wakeup registers */
   11041 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11042 		if (rv != 0) {
   11043 			device_printf(dev,
   11044 			    "%s: Could not enable PHY wakeup reg access\n",
   11045 			    __func__);
   11046 			return rv;
   11047 		}
   11048 	}
   11049 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11050 		device_xname(sc->sc_dev), __func__, page, regnum));
   11051 
   11052 	/*
   11053 	 * 2) Access PHY wakeup register.
   11054 	 * See wm_access_phy_wakeup_reg_bm.
   11055 	 */
   11056 
   11057 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11058 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11059 	if (rv != 0)
   11060 		return rv;
   11061 
   11062 	if (rd) {
   11063 		/* Read the Wakeup register page value using opcode 0x12 */
   11064 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11065 	} else {
   11066 		/* Write the Wakeup register page value using opcode 0x12 */
   11067 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11068 	}
   11069 	if (rv != 0)
   11070 		return rv;
   11071 
   11072 	if (!page_set)
   11073 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11074 
   11075 	return rv;
   11076 }
   11077 
   11078 /*
   11079  * wm_gmii_hv_readreg:	[mii interface function]
   11080  *
   11081  *	Read a PHY register on the kumeran
   11082  * This could be handled by the PHY layer if we didn't have to lock the
   11083  * ressource ...
   11084  */
   11085 static int
   11086 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11087 {
   11088 	struct wm_softc *sc = device_private(dev);
   11089 	int rv;
   11090 
   11091 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11092 		device_xname(dev), __func__));
   11093 	if (sc->phy.acquire(sc)) {
   11094 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11095 		return -1;
   11096 	}
   11097 
   11098 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11099 	sc->phy.release(sc);
   11100 	return rv;
   11101 }
   11102 
   11103 static int
   11104 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11105 {
   11106 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11107 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11108 	int rv;
   11109 
   11110 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11111 
   11112 	/* Page 800 works differently than the rest so it has its own func */
   11113 	if (page == BM_WUC_PAGE)
   11114 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11115 
   11116 	/*
   11117 	 * Lower than page 768 works differently than the rest so it has its
   11118 	 * own func
   11119 	 */
   11120 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11121 		printf("gmii_hv_readreg!!!\n");
   11122 		return -1;
   11123 	}
   11124 
   11125 	/*
   11126 	 * XXX I21[789] documents say that the SMBus Address register is at
   11127 	 * PHY address 01, Page 0 (not 768), Register 26.
   11128 	 */
   11129 	if (page == HV_INTC_FC_PAGE_START)
   11130 		page = 0;
   11131 
   11132 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11133 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11134 		    page << BME1000_PAGE_SHIFT);
   11135 		if (rv != 0)
   11136 			return rv;
   11137 	}
   11138 
   11139 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11140 }
   11141 
   11142 /*
   11143  * wm_gmii_hv_writereg:	[mii interface function]
   11144  *
   11145  *	Write a PHY register on the kumeran.
   11146  * This could be handled by the PHY layer if we didn't have to lock the
   11147  * ressource ...
   11148  */
   11149 static int
   11150 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11151 {
   11152 	struct wm_softc *sc = device_private(dev);
   11153 	int rv;
   11154 
   11155 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11156 		device_xname(dev), __func__));
   11157 
   11158 	if (sc->phy.acquire(sc)) {
   11159 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11160 		return -1;
   11161 	}
   11162 
   11163 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11164 	sc->phy.release(sc);
   11165 
   11166 	return rv;
   11167 }
   11168 
   11169 static int
   11170 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11171 {
   11172 	struct wm_softc *sc = device_private(dev);
   11173 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11174 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11175 	int rv;
   11176 
   11177 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11178 
   11179 	/* Page 800 works differently than the rest so it has its own func */
   11180 	if (page == BM_WUC_PAGE)
   11181 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11182 		    false);
   11183 
   11184 	/*
   11185 	 * Lower than page 768 works differently than the rest so it has its
   11186 	 * own func
   11187 	 */
   11188 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11189 		printf("gmii_hv_writereg!!!\n");
   11190 		return -1;
   11191 	}
   11192 
   11193 	{
   11194 		/*
   11195 		 * XXX I21[789] documents say that the SMBus Address register
   11196 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11197 		 */
   11198 		if (page == HV_INTC_FC_PAGE_START)
   11199 			page = 0;
   11200 
   11201 		/*
   11202 		 * XXX Workaround MDIO accesses being disabled after entering
   11203 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11204 		 * register is set)
   11205 		 */
   11206 		if (sc->sc_phytype == WMPHY_82578) {
   11207 			struct mii_softc *child;
   11208 
   11209 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11210 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11211 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11212 			    && ((val & (1 << 11)) != 0)) {
   11213 				printf("XXX need workaround\n");
   11214 			}
   11215 		}
   11216 
   11217 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11218 			rv = wm_gmii_mdic_writereg(dev, 1,
   11219 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11220 			if (rv != 0)
   11221 				return rv;
   11222 		}
   11223 	}
   11224 
   11225 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11226 }
   11227 
   11228 /*
   11229  * wm_gmii_82580_readreg:	[mii interface function]
   11230  *
   11231  *	Read a PHY register on the 82580 and I350.
   11232  * This could be handled by the PHY layer if we didn't have to lock the
   11233  * ressource ...
   11234  */
   11235 static int
   11236 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11237 {
   11238 	struct wm_softc *sc = device_private(dev);
   11239 	int rv;
   11240 
   11241 	if (sc->phy.acquire(sc) != 0) {
   11242 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11243 		return -1;
   11244 	}
   11245 
   11246 #ifdef DIAGNOSTIC
   11247 	if (reg > MII_ADDRMASK) {
   11248 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11249 		    __func__, sc->sc_phytype, reg);
   11250 		reg &= MII_ADDRMASK;
   11251 	}
   11252 #endif
   11253 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11254 
   11255 	sc->phy.release(sc);
   11256 	return rv;
   11257 }
   11258 
   11259 /*
   11260  * wm_gmii_82580_writereg:	[mii interface function]
   11261  *
   11262  *	Write a PHY register on the 82580 and I350.
   11263  * This could be handled by the PHY layer if we didn't have to lock the
   11264  * ressource ...
   11265  */
   11266 static int
   11267 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11268 {
   11269 	struct wm_softc *sc = device_private(dev);
   11270 	int rv;
   11271 
   11272 	if (sc->phy.acquire(sc) != 0) {
   11273 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11274 		return -1;
   11275 	}
   11276 
   11277 #ifdef DIAGNOSTIC
   11278 	if (reg > MII_ADDRMASK) {
   11279 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11280 		    __func__, sc->sc_phytype, reg);
   11281 		reg &= MII_ADDRMASK;
   11282 	}
   11283 #endif
   11284 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11285 
   11286 	sc->phy.release(sc);
   11287 	return rv;
   11288 }
   11289 
   11290 /*
   11291  * wm_gmii_gs40g_readreg:	[mii interface function]
   11292  *
   11293  *	Read a PHY register on the I2100 and I211.
   11294  * This could be handled by the PHY layer if we didn't have to lock the
   11295  * ressource ...
   11296  */
   11297 static int
   11298 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11299 {
   11300 	struct wm_softc *sc = device_private(dev);
   11301 	int page, offset;
   11302 	int rv;
   11303 
   11304 	/* Acquire semaphore */
   11305 	if (sc->phy.acquire(sc)) {
   11306 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11307 		return -1;
   11308 	}
   11309 
   11310 	/* Page select */
   11311 	page = reg >> GS40G_PAGE_SHIFT;
   11312 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11313 	if (rv != 0)
   11314 		goto release;
   11315 
   11316 	/* Read reg */
   11317 	offset = reg & GS40G_OFFSET_MASK;
   11318 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11319 
   11320 release:
   11321 	sc->phy.release(sc);
   11322 	return rv;
   11323 }
   11324 
   11325 /*
   11326  * wm_gmii_gs40g_writereg:	[mii interface function]
   11327  *
   11328  *	Write a PHY register on the I210 and I211.
   11329  * This could be handled by the PHY layer if we didn't have to lock the
   11330  * ressource ...
   11331  */
   11332 static int
   11333 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11334 {
   11335 	struct wm_softc *sc = device_private(dev);
   11336 	uint16_t page;
   11337 	int offset, rv;
   11338 
   11339 	/* Acquire semaphore */
   11340 	if (sc->phy.acquire(sc)) {
   11341 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11342 		return -1;
   11343 	}
   11344 
   11345 	/* Page select */
   11346 	page = reg >> GS40G_PAGE_SHIFT;
   11347 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11348 	if (rv != 0)
   11349 		goto release;
   11350 
   11351 	/* Write reg */
   11352 	offset = reg & GS40G_OFFSET_MASK;
   11353 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11354 
   11355 release:
   11356 	/* Release semaphore */
   11357 	sc->phy.release(sc);
   11358 	return rv;
   11359 }
   11360 
   11361 /*
   11362  * wm_gmii_statchg:	[mii interface function]
   11363  *
   11364  *	Callback from MII layer when media changes.
   11365  */
   11366 static void
   11367 wm_gmii_statchg(struct ifnet *ifp)
   11368 {
   11369 	struct wm_softc *sc = ifp->if_softc;
   11370 	struct mii_data *mii = &sc->sc_mii;
   11371 
   11372 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11373 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11374 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11375 
   11376 	/*
   11377 	 * Get flow control negotiation result.
   11378 	 */
   11379 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11380 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11381 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11382 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11383 	}
   11384 
   11385 	if (sc->sc_flowflags & IFM_FLOW) {
   11386 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11387 			sc->sc_ctrl |= CTRL_TFCE;
   11388 			sc->sc_fcrtl |= FCRTL_XONE;
   11389 		}
   11390 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11391 			sc->sc_ctrl |= CTRL_RFCE;
   11392 	}
   11393 
   11394 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11395 		DPRINTF(WM_DEBUG_LINK,
   11396 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11397 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11398 	} else {
   11399 		DPRINTF(WM_DEBUG_LINK,
   11400 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11401 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11402 	}
   11403 
   11404 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11405 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11406 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11407 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11408 	if (sc->sc_type == WM_T_80003) {
   11409 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11410 		case IFM_1000_T:
   11411 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11412 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11413 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11414 			break;
   11415 		default:
   11416 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11417 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11418 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11419 			break;
   11420 		}
   11421 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11422 	}
   11423 }
   11424 
   11425 /* kumeran related (80003, ICH* and PCH*) */
   11426 
   11427 /*
   11428  * wm_kmrn_readreg:
   11429  *
   11430  *	Read a kumeran register
   11431  */
   11432 static int
   11433 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11434 {
   11435 	int rv;
   11436 
   11437 	if (sc->sc_type == WM_T_80003)
   11438 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11439 	else
   11440 		rv = sc->phy.acquire(sc);
   11441 	if (rv != 0) {
   11442 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11443 		    __func__);
   11444 		return rv;
   11445 	}
   11446 
   11447 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11448 
   11449 	if (sc->sc_type == WM_T_80003)
   11450 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11451 	else
   11452 		sc->phy.release(sc);
   11453 
   11454 	return rv;
   11455 }
   11456 
   11457 static int
   11458 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11459 {
   11460 
   11461 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11462 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11463 	    KUMCTRLSTA_REN);
   11464 	CSR_WRITE_FLUSH(sc);
   11465 	delay(2);
   11466 
   11467 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11468 
   11469 	return 0;
   11470 }
   11471 
   11472 /*
   11473  * wm_kmrn_writereg:
   11474  *
   11475  *	Write a kumeran register
   11476  */
   11477 static int
   11478 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11479 {
   11480 	int rv;
   11481 
   11482 	if (sc->sc_type == WM_T_80003)
   11483 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11484 	else
   11485 		rv = sc->phy.acquire(sc);
   11486 	if (rv != 0) {
   11487 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11488 		    __func__);
   11489 		return rv;
   11490 	}
   11491 
   11492 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11493 
   11494 	if (sc->sc_type == WM_T_80003)
   11495 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11496 	else
   11497 		sc->phy.release(sc);
   11498 
   11499 	return rv;
   11500 }
   11501 
   11502 static int
   11503 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11504 {
   11505 
   11506 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11507 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11508 
   11509 	return 0;
   11510 }
   11511 
   11512 /*
   11513  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11514  * This access method is different from IEEE MMD.
   11515  */
   11516 static int
   11517 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11518 {
   11519 	struct wm_softc *sc = device_private(dev);
   11520 	int rv;
   11521 
   11522 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11523 	if (rv != 0)
   11524 		return rv;
   11525 
   11526 	if (rd)
   11527 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11528 	else
   11529 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11530 	return rv;
   11531 }
   11532 
   11533 static int
   11534 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11535 {
   11536 
   11537 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11538 }
   11539 
   11540 static int
   11541 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11542 {
   11543 
   11544 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11545 }
   11546 
   11547 /* SGMII related */
   11548 
   11549 /*
   11550  * wm_sgmii_uses_mdio
   11551  *
   11552  * Check whether the transaction is to the internal PHY or the external
   11553  * MDIO interface. Return true if it's MDIO.
   11554  */
   11555 static bool
   11556 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11557 {
   11558 	uint32_t reg;
   11559 	bool ismdio = false;
   11560 
   11561 	switch (sc->sc_type) {
   11562 	case WM_T_82575:
   11563 	case WM_T_82576:
   11564 		reg = CSR_READ(sc, WMREG_MDIC);
   11565 		ismdio = ((reg & MDIC_DEST) != 0);
   11566 		break;
   11567 	case WM_T_82580:
   11568 	case WM_T_I350:
   11569 	case WM_T_I354:
   11570 	case WM_T_I210:
   11571 	case WM_T_I211:
   11572 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11573 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11574 		break;
   11575 	default:
   11576 		break;
   11577 	}
   11578 
   11579 	return ismdio;
   11580 }
   11581 
   11582 /*
   11583  * wm_sgmii_readreg:	[mii interface function]
   11584  *
   11585  *	Read a PHY register on the SGMII
   11586  * This could be handled by the PHY layer if we didn't have to lock the
   11587  * ressource ...
   11588  */
   11589 static int
   11590 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11591 {
   11592 	struct wm_softc *sc = device_private(dev);
   11593 	int rv;
   11594 
   11595 	if (sc->phy.acquire(sc)) {
   11596 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11597 		return -1;
   11598 	}
   11599 
   11600 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11601 
   11602 	sc->phy.release(sc);
   11603 	return rv;
   11604 }
   11605 
   11606 static int
   11607 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11608 {
   11609 	struct wm_softc *sc = device_private(dev);
   11610 	uint32_t i2ccmd;
   11611 	int i, rv;
   11612 
   11613 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11614 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11615 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11616 
   11617 	/* Poll the ready bit */
   11618 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11619 		delay(50);
   11620 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11621 		if (i2ccmd & I2CCMD_READY)
   11622 			break;
   11623 	}
   11624 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11625 		device_printf(dev, "I2CCMD Read did not complete\n");
   11626 		rv = ETIMEDOUT;
   11627 	}
   11628 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11629 		device_printf(dev, "I2CCMD Error bit set\n");
   11630 		rv = EIO;
   11631 	}
   11632 
   11633 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11634 
   11635 	return rv;
   11636 }
   11637 
   11638 /*
   11639  * wm_sgmii_writereg:	[mii interface function]
   11640  *
   11641  *	Write a PHY register on the SGMII.
   11642  * This could be handled by the PHY layer if we didn't have to lock the
   11643  * ressource ...
   11644  */
   11645 static int
   11646 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11647 {
   11648 	struct wm_softc *sc = device_private(dev);
   11649 	int rv;
   11650 
   11651 	if (sc->phy.acquire(sc) != 0) {
   11652 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11653 		return -1;
   11654 	}
   11655 
   11656 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11657 
   11658 	sc->phy.release(sc);
   11659 
   11660 	return rv;
   11661 }
   11662 
   11663 static int
   11664 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11665 {
   11666 	struct wm_softc *sc = device_private(dev);
   11667 	uint32_t i2ccmd;
   11668 	uint16_t swapdata;
   11669 	int rv = 0;
   11670 	int i;
   11671 
   11672 	/* Swap the data bytes for the I2C interface */
   11673 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11674 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11675 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11676 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11677 
   11678 	/* Poll the ready bit */
   11679 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11680 		delay(50);
   11681 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11682 		if (i2ccmd & I2CCMD_READY)
   11683 			break;
   11684 	}
   11685 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11686 		device_printf(dev, "I2CCMD Write did not complete\n");
   11687 		rv = ETIMEDOUT;
   11688 	}
   11689 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11690 		device_printf(dev, "I2CCMD Error bit set\n");
   11691 		rv = EIO;
   11692 	}
   11693 
   11694 	return rv;
   11695 }
   11696 
   11697 /* TBI related */
   11698 
   11699 static bool
   11700 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11701 {
   11702 	bool sig;
   11703 
   11704 	sig = ctrl & CTRL_SWDPIN(1);
   11705 
   11706 	/*
   11707 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11708 	 * detect a signal, 1 if they don't.
   11709 	 */
   11710 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11711 		sig = !sig;
   11712 
   11713 	return sig;
   11714 }
   11715 
   11716 /*
   11717  * wm_tbi_mediainit:
   11718  *
   11719  *	Initialize media for use on 1000BASE-X devices.
   11720  */
   11721 static void
   11722 wm_tbi_mediainit(struct wm_softc *sc)
   11723 {
   11724 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11725 	const char *sep = "";
   11726 
   11727 	if (sc->sc_type < WM_T_82543)
   11728 		sc->sc_tipg = TIPG_WM_DFLT;
   11729 	else
   11730 		sc->sc_tipg = TIPG_LG_DFLT;
   11731 
   11732 	sc->sc_tbi_serdes_anegticks = 5;
   11733 
   11734 	/* Initialize our media structures */
   11735 	sc->sc_mii.mii_ifp = ifp;
   11736 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11737 
   11738 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11739 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11740 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11741 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11742 	else
   11743 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11744 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11745 
   11746 	/*
   11747 	 * SWD Pins:
   11748 	 *
   11749 	 *	0 = Link LED (output)
   11750 	 *	1 = Loss Of Signal (input)
   11751 	 */
   11752 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11753 
   11754 	/* XXX Perhaps this is only for TBI */
   11755 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11756 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11757 
   11758 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11759 		sc->sc_ctrl &= ~CTRL_LRST;
   11760 
   11761 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11762 
   11763 #define	ADD(ss, mm, dd)							\
   11764 do {									\
   11765 	aprint_normal("%s%s", sep, ss);					\
   11766 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11767 	sep = ", ";							\
   11768 } while (/*CONSTCOND*/0)
   11769 
   11770 	aprint_normal_dev(sc->sc_dev, "");
   11771 
   11772 	if (sc->sc_type == WM_T_I354) {
   11773 		uint32_t status;
   11774 
   11775 		status = CSR_READ(sc, WMREG_STATUS);
   11776 		if (((status & STATUS_2P5_SKU) != 0)
   11777 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11778 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11779 		} else
   11780 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11781 	} else if (sc->sc_type == WM_T_82545) {
   11782 		/* Only 82545 is LX (XXX except SFP) */
   11783 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11784 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11785 	} else {
   11786 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11787 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11788 	}
   11789 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11790 	aprint_normal("\n");
   11791 
   11792 #undef ADD
   11793 
   11794 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11795 }
   11796 
   11797 /*
   11798  * wm_tbi_mediachange:	[ifmedia interface function]
   11799  *
   11800  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11801  */
   11802 static int
   11803 wm_tbi_mediachange(struct ifnet *ifp)
   11804 {
   11805 	struct wm_softc *sc = ifp->if_softc;
   11806 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11807 	uint32_t status, ctrl;
   11808 	bool signal;
   11809 	int i;
   11810 
   11811 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11812 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11813 		/* XXX need some work for >= 82571 and < 82575 */
   11814 		if (sc->sc_type < WM_T_82575)
   11815 			return 0;
   11816 	}
   11817 
   11818 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11819 	    || (sc->sc_type >= WM_T_82575))
   11820 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11821 
   11822 	sc->sc_ctrl &= ~CTRL_LRST;
   11823 	sc->sc_txcw = TXCW_ANE;
   11824 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11825 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11826 	else if (ife->ifm_media & IFM_FDX)
   11827 		sc->sc_txcw |= TXCW_FD;
   11828 	else
   11829 		sc->sc_txcw |= TXCW_HD;
   11830 
   11831 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11832 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11833 
   11834 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11835 		device_xname(sc->sc_dev), sc->sc_txcw));
   11836 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11837 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11838 	CSR_WRITE_FLUSH(sc);
   11839 	delay(1000);
   11840 
   11841 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11842 	signal = wm_tbi_havesignal(sc, ctrl);
   11843 
   11844 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11845 		signal));
   11846 
   11847 	if (signal) {
   11848 		/* Have signal; wait for the link to come up. */
   11849 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11850 			delay(10000);
   11851 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11852 				break;
   11853 		}
   11854 
   11855 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11856 			device_xname(sc->sc_dev),i));
   11857 
   11858 		status = CSR_READ(sc, WMREG_STATUS);
   11859 		DPRINTF(WM_DEBUG_LINK,
   11860 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11861 			device_xname(sc->sc_dev),status, STATUS_LU));
   11862 		if (status & STATUS_LU) {
   11863 			/* Link is up. */
   11864 			DPRINTF(WM_DEBUG_LINK,
   11865 			    ("%s: LINK: set media -> link up %s\n",
   11866 				device_xname(sc->sc_dev),
   11867 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11868 
   11869 			/*
   11870 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11871 			 * so we should update sc->sc_ctrl
   11872 			 */
   11873 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11874 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11875 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11876 			if (status & STATUS_FD)
   11877 				sc->sc_tctl |=
   11878 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11879 			else
   11880 				sc->sc_tctl |=
   11881 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11882 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11883 				sc->sc_fcrtl |= FCRTL_XONE;
   11884 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11885 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11886 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11887 			sc->sc_tbi_linkup = 1;
   11888 		} else {
   11889 			if (i == WM_LINKUP_TIMEOUT)
   11890 				wm_check_for_link(sc);
   11891 			/* Link is down. */
   11892 			DPRINTF(WM_DEBUG_LINK,
   11893 			    ("%s: LINK: set media -> link down\n",
   11894 				device_xname(sc->sc_dev)));
   11895 			sc->sc_tbi_linkup = 0;
   11896 		}
   11897 	} else {
   11898 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11899 			device_xname(sc->sc_dev)));
   11900 		sc->sc_tbi_linkup = 0;
   11901 	}
   11902 
   11903 	wm_tbi_serdes_set_linkled(sc);
   11904 
   11905 	return 0;
   11906 }
   11907 
   11908 /*
   11909  * wm_tbi_mediastatus:	[ifmedia interface function]
   11910  *
   11911  *	Get the current interface media status on a 1000BASE-X device.
   11912  */
   11913 static void
   11914 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11915 {
   11916 	struct wm_softc *sc = ifp->if_softc;
   11917 	uint32_t ctrl, status;
   11918 
   11919 	ifmr->ifm_status = IFM_AVALID;
   11920 	ifmr->ifm_active = IFM_ETHER;
   11921 
   11922 	status = CSR_READ(sc, WMREG_STATUS);
   11923 	if ((status & STATUS_LU) == 0) {
   11924 		ifmr->ifm_active |= IFM_NONE;
   11925 		return;
   11926 	}
   11927 
   11928 	ifmr->ifm_status |= IFM_ACTIVE;
   11929 	/* Only 82545 is LX */
   11930 	if (sc->sc_type == WM_T_82545)
   11931 		ifmr->ifm_active |= IFM_1000_LX;
   11932 	else
   11933 		ifmr->ifm_active |= IFM_1000_SX;
   11934 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11935 		ifmr->ifm_active |= IFM_FDX;
   11936 	else
   11937 		ifmr->ifm_active |= IFM_HDX;
   11938 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11939 	if (ctrl & CTRL_RFCE)
   11940 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11941 	if (ctrl & CTRL_TFCE)
   11942 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11943 }
   11944 
   11945 /* XXX TBI only */
   11946 static int
   11947 wm_check_for_link(struct wm_softc *sc)
   11948 {
   11949 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11950 	uint32_t rxcw;
   11951 	uint32_t ctrl;
   11952 	uint32_t status;
   11953 	bool signal;
   11954 
   11955 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11956 		device_xname(sc->sc_dev), __func__));
   11957 
   11958 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11959 		/* XXX need some work for >= 82571 */
   11960 		if (sc->sc_type >= WM_T_82571) {
   11961 			sc->sc_tbi_linkup = 1;
   11962 			return 0;
   11963 		}
   11964 	}
   11965 
   11966 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11967 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11968 	status = CSR_READ(sc, WMREG_STATUS);
   11969 	signal = wm_tbi_havesignal(sc, ctrl);
   11970 
   11971 	DPRINTF(WM_DEBUG_LINK,
   11972 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11973 		device_xname(sc->sc_dev), __func__, signal,
   11974 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11975 
   11976 	/*
   11977 	 * SWDPIN   LU RXCW
   11978 	 *	0    0	  0
   11979 	 *	0    0	  1	(should not happen)
   11980 	 *	0    1	  0	(should not happen)
   11981 	 *	0    1	  1	(should not happen)
   11982 	 *	1    0	  0	Disable autonego and force linkup
   11983 	 *	1    0	  1	got /C/ but not linkup yet
   11984 	 *	1    1	  0	(linkup)
   11985 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11986 	 *
   11987 	 */
   11988 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11989 		DPRINTF(WM_DEBUG_LINK,
   11990 		    ("%s: %s: force linkup and fullduplex\n",
   11991 			device_xname(sc->sc_dev), __func__));
   11992 		sc->sc_tbi_linkup = 0;
   11993 		/* Disable auto-negotiation in the TXCW register */
   11994 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11995 
   11996 		/*
   11997 		 * Force link-up and also force full-duplex.
   11998 		 *
   11999 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12000 		 * so we should update sc->sc_ctrl
   12001 		 */
   12002 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12003 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12004 	} else if (((status & STATUS_LU) != 0)
   12005 	    && ((rxcw & RXCW_C) != 0)
   12006 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12007 		sc->sc_tbi_linkup = 1;
   12008 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12009 			device_xname(sc->sc_dev),
   12010 			__func__));
   12011 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12012 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12013 	} else if (signal && ((rxcw & RXCW_C) != 0))
   12014 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12015 			device_xname(sc->sc_dev), __func__));
   12016 	else
   12017 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12018 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12019 			status));
   12020 
   12021 	return 0;
   12022 }
   12023 
   12024 /*
   12025  * wm_tbi_tick:
   12026  *
   12027  *	Check the link on TBI devices.
   12028  *	This function acts as mii_tick().
   12029  */
   12030 static void
   12031 wm_tbi_tick(struct wm_softc *sc)
   12032 {
   12033 	struct mii_data *mii = &sc->sc_mii;
   12034 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12035 	uint32_t status;
   12036 
   12037 	KASSERT(WM_CORE_LOCKED(sc));
   12038 
   12039 	status = CSR_READ(sc, WMREG_STATUS);
   12040 
   12041 	/* XXX is this needed? */
   12042 	(void)CSR_READ(sc, WMREG_RXCW);
   12043 	(void)CSR_READ(sc, WMREG_CTRL);
   12044 
   12045 	/* set link status */
   12046 	if ((status & STATUS_LU) == 0) {
   12047 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12048 			device_xname(sc->sc_dev)));
   12049 		sc->sc_tbi_linkup = 0;
   12050 	} else if (sc->sc_tbi_linkup == 0) {
   12051 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12052 			device_xname(sc->sc_dev),
   12053 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12054 		sc->sc_tbi_linkup = 1;
   12055 		sc->sc_tbi_serdes_ticks = 0;
   12056 	}
   12057 
   12058 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12059 		goto setled;
   12060 
   12061 	if ((status & STATUS_LU) == 0) {
   12062 		sc->sc_tbi_linkup = 0;
   12063 		/* If the timer expired, retry autonegotiation */
   12064 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12065 		    && (++sc->sc_tbi_serdes_ticks
   12066 			>= sc->sc_tbi_serdes_anegticks)) {
   12067 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12068 			sc->sc_tbi_serdes_ticks = 0;
   12069 			/*
   12070 			 * Reset the link, and let autonegotiation do
   12071 			 * its thing
   12072 			 */
   12073 			sc->sc_ctrl |= CTRL_LRST;
   12074 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12075 			CSR_WRITE_FLUSH(sc);
   12076 			delay(1000);
   12077 			sc->sc_ctrl &= ~CTRL_LRST;
   12078 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12079 			CSR_WRITE_FLUSH(sc);
   12080 			delay(1000);
   12081 			CSR_WRITE(sc, WMREG_TXCW,
   12082 			    sc->sc_txcw & ~TXCW_ANE);
   12083 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12084 		}
   12085 	}
   12086 
   12087 setled:
   12088 	wm_tbi_serdes_set_linkled(sc);
   12089 }
   12090 
   12091 /* SERDES related */
   12092 static void
   12093 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12094 {
   12095 	uint32_t reg;
   12096 
   12097 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12098 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12099 		return;
   12100 
   12101 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12102 	reg |= PCS_CFG_PCS_EN;
   12103 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12104 
   12105 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12106 	reg &= ~CTRL_EXT_SWDPIN(3);
   12107 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12108 	CSR_WRITE_FLUSH(sc);
   12109 }
   12110 
   12111 static int
   12112 wm_serdes_mediachange(struct ifnet *ifp)
   12113 {
   12114 	struct wm_softc *sc = ifp->if_softc;
   12115 	bool pcs_autoneg = true; /* XXX */
   12116 	uint32_t ctrl_ext, pcs_lctl, reg;
   12117 
   12118 	/* XXX Currently, this function is not called on 8257[12] */
   12119 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12120 	    || (sc->sc_type >= WM_T_82575))
   12121 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12122 
   12123 	wm_serdes_power_up_link_82575(sc);
   12124 
   12125 	sc->sc_ctrl |= CTRL_SLU;
   12126 
   12127 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12128 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12129 
   12130 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12131 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12132 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12133 	case CTRL_EXT_LINK_MODE_SGMII:
   12134 		pcs_autoneg = true;
   12135 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12136 		break;
   12137 	case CTRL_EXT_LINK_MODE_1000KX:
   12138 		pcs_autoneg = false;
   12139 		/* FALLTHROUGH */
   12140 	default:
   12141 		if ((sc->sc_type == WM_T_82575)
   12142 		    || (sc->sc_type == WM_T_82576)) {
   12143 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12144 				pcs_autoneg = false;
   12145 		}
   12146 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12147 		    | CTRL_FRCFDX;
   12148 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12149 	}
   12150 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12151 
   12152 	if (pcs_autoneg) {
   12153 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12154 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12155 
   12156 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12157 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12158 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12159 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12160 	} else
   12161 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12162 
   12163 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12164 
   12165 
   12166 	return 0;
   12167 }
   12168 
   12169 static void
   12170 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12171 {
   12172 	struct wm_softc *sc = ifp->if_softc;
   12173 	struct mii_data *mii = &sc->sc_mii;
   12174 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12175 	uint32_t pcs_adv, pcs_lpab, reg;
   12176 
   12177 	ifmr->ifm_status = IFM_AVALID;
   12178 	ifmr->ifm_active = IFM_ETHER;
   12179 
   12180 	/* Check PCS */
   12181 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12182 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12183 		ifmr->ifm_active |= IFM_NONE;
   12184 		sc->sc_tbi_linkup = 0;
   12185 		goto setled;
   12186 	}
   12187 
   12188 	sc->sc_tbi_linkup = 1;
   12189 	ifmr->ifm_status |= IFM_ACTIVE;
   12190 	if (sc->sc_type == WM_T_I354) {
   12191 		uint32_t status;
   12192 
   12193 		status = CSR_READ(sc, WMREG_STATUS);
   12194 		if (((status & STATUS_2P5_SKU) != 0)
   12195 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12196 			ifmr->ifm_active |= IFM_2500_KX;
   12197 		} else
   12198 			ifmr->ifm_active |= IFM_1000_KX;
   12199 	} else {
   12200 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12201 		case PCS_LSTS_SPEED_10:
   12202 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12203 			break;
   12204 		case PCS_LSTS_SPEED_100:
   12205 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12206 			break;
   12207 		case PCS_LSTS_SPEED_1000:
   12208 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12209 			break;
   12210 		default:
   12211 			device_printf(sc->sc_dev, "Unknown speed\n");
   12212 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12213 			break;
   12214 		}
   12215 	}
   12216 	if ((reg & PCS_LSTS_FDX) != 0)
   12217 		ifmr->ifm_active |= IFM_FDX;
   12218 	else
   12219 		ifmr->ifm_active |= IFM_HDX;
   12220 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12221 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12222 		/* Check flow */
   12223 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12224 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12225 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12226 			goto setled;
   12227 		}
   12228 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12229 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12230 		DPRINTF(WM_DEBUG_LINK,
   12231 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12232 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12233 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12234 			mii->mii_media_active |= IFM_FLOW
   12235 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12236 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12237 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12238 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12239 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12240 			mii->mii_media_active |= IFM_FLOW
   12241 			    | IFM_ETH_TXPAUSE;
   12242 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12243 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12244 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12245 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12246 			mii->mii_media_active |= IFM_FLOW
   12247 			    | IFM_ETH_RXPAUSE;
   12248 		}
   12249 	}
   12250 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12251 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12252 setled:
   12253 	wm_tbi_serdes_set_linkled(sc);
   12254 }
   12255 
   12256 /*
   12257  * wm_serdes_tick:
   12258  *
   12259  *	Check the link on serdes devices.
   12260  */
   12261 static void
   12262 wm_serdes_tick(struct wm_softc *sc)
   12263 {
   12264 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12265 	struct mii_data *mii = &sc->sc_mii;
   12266 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12267 	uint32_t reg;
   12268 
   12269 	KASSERT(WM_CORE_LOCKED(sc));
   12270 
   12271 	mii->mii_media_status = IFM_AVALID;
   12272 	mii->mii_media_active = IFM_ETHER;
   12273 
   12274 	/* Check PCS */
   12275 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12276 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12277 		mii->mii_media_status |= IFM_ACTIVE;
   12278 		sc->sc_tbi_linkup = 1;
   12279 		sc->sc_tbi_serdes_ticks = 0;
   12280 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12281 		if ((reg & PCS_LSTS_FDX) != 0)
   12282 			mii->mii_media_active |= IFM_FDX;
   12283 		else
   12284 			mii->mii_media_active |= IFM_HDX;
   12285 	} else {
   12286 		mii->mii_media_status |= IFM_NONE;
   12287 		sc->sc_tbi_linkup = 0;
   12288 		/* If the timer expired, retry autonegotiation */
   12289 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12290 		    && (++sc->sc_tbi_serdes_ticks
   12291 			>= sc->sc_tbi_serdes_anegticks)) {
   12292 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12293 			sc->sc_tbi_serdes_ticks = 0;
   12294 			/* XXX */
   12295 			wm_serdes_mediachange(ifp);
   12296 		}
   12297 	}
   12298 
   12299 	wm_tbi_serdes_set_linkled(sc);
   12300 }
   12301 
   12302 /* SFP related */
   12303 
   12304 static int
   12305 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12306 {
   12307 	uint32_t i2ccmd;
   12308 	int i;
   12309 
   12310 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12311 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12312 
   12313 	/* Poll the ready bit */
   12314 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12315 		delay(50);
   12316 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12317 		if (i2ccmd & I2CCMD_READY)
   12318 			break;
   12319 	}
   12320 	if ((i2ccmd & I2CCMD_READY) == 0)
   12321 		return -1;
   12322 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12323 		return -1;
   12324 
   12325 	*data = i2ccmd & 0x00ff;
   12326 
   12327 	return 0;
   12328 }
   12329 
   12330 static uint32_t
   12331 wm_sfp_get_media_type(struct wm_softc *sc)
   12332 {
   12333 	uint32_t ctrl_ext;
   12334 	uint8_t val = 0;
   12335 	int timeout = 3;
   12336 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12337 	int rv = -1;
   12338 
   12339 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12340 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12341 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12342 	CSR_WRITE_FLUSH(sc);
   12343 
   12344 	/* Read SFP module data */
   12345 	while (timeout) {
   12346 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12347 		if (rv == 0)
   12348 			break;
   12349 		delay(100*1000); /* XXX too big */
   12350 		timeout--;
   12351 	}
   12352 	if (rv != 0)
   12353 		goto out;
   12354 	switch (val) {
   12355 	case SFF_SFP_ID_SFF:
   12356 		aprint_normal_dev(sc->sc_dev,
   12357 		    "Module/Connector soldered to board\n");
   12358 		break;
   12359 	case SFF_SFP_ID_SFP:
   12360 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12361 		break;
   12362 	case SFF_SFP_ID_UNKNOWN:
   12363 		goto out;
   12364 	default:
   12365 		break;
   12366 	}
   12367 
   12368 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12369 	if (rv != 0) {
   12370 		goto out;
   12371 	}
   12372 
   12373 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12374 		mediatype = WM_MEDIATYPE_SERDES;
   12375 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12376 		sc->sc_flags |= WM_F_SGMII;
   12377 		mediatype = WM_MEDIATYPE_COPPER;
   12378 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12379 		sc->sc_flags |= WM_F_SGMII;
   12380 		mediatype = WM_MEDIATYPE_SERDES;
   12381 	}
   12382 
   12383 out:
   12384 	/* Restore I2C interface setting */
   12385 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12386 
   12387 	return mediatype;
   12388 }
   12389 
   12390 /*
   12391  * NVM related.
   12392  * Microwire, SPI (w/wo EERD) and Flash.
   12393  */
   12394 
   12395 /* Both spi and uwire */
   12396 
   12397 /*
   12398  * wm_eeprom_sendbits:
   12399  *
   12400  *	Send a series of bits to the EEPROM.
   12401  */
   12402 static void
   12403 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12404 {
   12405 	uint32_t reg;
   12406 	int x;
   12407 
   12408 	reg = CSR_READ(sc, WMREG_EECD);
   12409 
   12410 	for (x = nbits; x > 0; x--) {
   12411 		if (bits & (1U << (x - 1)))
   12412 			reg |= EECD_DI;
   12413 		else
   12414 			reg &= ~EECD_DI;
   12415 		CSR_WRITE(sc, WMREG_EECD, reg);
   12416 		CSR_WRITE_FLUSH(sc);
   12417 		delay(2);
   12418 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12419 		CSR_WRITE_FLUSH(sc);
   12420 		delay(2);
   12421 		CSR_WRITE(sc, WMREG_EECD, reg);
   12422 		CSR_WRITE_FLUSH(sc);
   12423 		delay(2);
   12424 	}
   12425 }
   12426 
   12427 /*
   12428  * wm_eeprom_recvbits:
   12429  *
   12430  *	Receive a series of bits from the EEPROM.
   12431  */
   12432 static void
   12433 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12434 {
   12435 	uint32_t reg, val;
   12436 	int x;
   12437 
   12438 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12439 
   12440 	val = 0;
   12441 	for (x = nbits; x > 0; x--) {
   12442 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12443 		CSR_WRITE_FLUSH(sc);
   12444 		delay(2);
   12445 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12446 			val |= (1U << (x - 1));
   12447 		CSR_WRITE(sc, WMREG_EECD, reg);
   12448 		CSR_WRITE_FLUSH(sc);
   12449 		delay(2);
   12450 	}
   12451 	*valp = val;
   12452 }
   12453 
   12454 /* Microwire */
   12455 
   12456 /*
   12457  * wm_nvm_read_uwire:
   12458  *
   12459  *	Read a word from the EEPROM using the MicroWire protocol.
   12460  */
   12461 static int
   12462 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12463 {
   12464 	uint32_t reg, val;
   12465 	int i;
   12466 
   12467 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12468 		device_xname(sc->sc_dev), __func__));
   12469 
   12470 	if (sc->nvm.acquire(sc) != 0)
   12471 		return -1;
   12472 
   12473 	for (i = 0; i < wordcnt; i++) {
   12474 		/* Clear SK and DI. */
   12475 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12476 		CSR_WRITE(sc, WMREG_EECD, reg);
   12477 
   12478 		/*
   12479 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12480 		 * and Xen.
   12481 		 *
   12482 		 * We use this workaround only for 82540 because qemu's
   12483 		 * e1000 act as 82540.
   12484 		 */
   12485 		if (sc->sc_type == WM_T_82540) {
   12486 			reg |= EECD_SK;
   12487 			CSR_WRITE(sc, WMREG_EECD, reg);
   12488 			reg &= ~EECD_SK;
   12489 			CSR_WRITE(sc, WMREG_EECD, reg);
   12490 			CSR_WRITE_FLUSH(sc);
   12491 			delay(2);
   12492 		}
   12493 		/* XXX: end of workaround */
   12494 
   12495 		/* Set CHIP SELECT. */
   12496 		reg |= EECD_CS;
   12497 		CSR_WRITE(sc, WMREG_EECD, reg);
   12498 		CSR_WRITE_FLUSH(sc);
   12499 		delay(2);
   12500 
   12501 		/* Shift in the READ command. */
   12502 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12503 
   12504 		/* Shift in address. */
   12505 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12506 
   12507 		/* Shift out the data. */
   12508 		wm_eeprom_recvbits(sc, &val, 16);
   12509 		data[i] = val & 0xffff;
   12510 
   12511 		/* Clear CHIP SELECT. */
   12512 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12513 		CSR_WRITE(sc, WMREG_EECD, reg);
   12514 		CSR_WRITE_FLUSH(sc);
   12515 		delay(2);
   12516 	}
   12517 
   12518 	sc->nvm.release(sc);
   12519 	return 0;
   12520 }
   12521 
   12522 /* SPI */
   12523 
   12524 /*
   12525  * Set SPI and FLASH related information from the EECD register.
   12526  * For 82541 and 82547, the word size is taken from EEPROM.
   12527  */
   12528 static int
   12529 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12530 {
   12531 	int size;
   12532 	uint32_t reg;
   12533 	uint16_t data;
   12534 
   12535 	reg = CSR_READ(sc, WMREG_EECD);
   12536 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12537 
   12538 	/* Read the size of NVM from EECD by default */
   12539 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12540 	switch (sc->sc_type) {
   12541 	case WM_T_82541:
   12542 	case WM_T_82541_2:
   12543 	case WM_T_82547:
   12544 	case WM_T_82547_2:
   12545 		/* Set dummy value to access EEPROM */
   12546 		sc->sc_nvm_wordsize = 64;
   12547 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12548 			aprint_error_dev(sc->sc_dev,
   12549 			    "%s: failed to read EEPROM size\n", __func__);
   12550 		}
   12551 		reg = data;
   12552 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12553 		if (size == 0)
   12554 			size = 6; /* 64 word size */
   12555 		else
   12556 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12557 		break;
   12558 	case WM_T_80003:
   12559 	case WM_T_82571:
   12560 	case WM_T_82572:
   12561 	case WM_T_82573: /* SPI case */
   12562 	case WM_T_82574: /* SPI case */
   12563 	case WM_T_82583: /* SPI case */
   12564 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12565 		if (size > 14)
   12566 			size = 14;
   12567 		break;
   12568 	case WM_T_82575:
   12569 	case WM_T_82576:
   12570 	case WM_T_82580:
   12571 	case WM_T_I350:
   12572 	case WM_T_I354:
   12573 	case WM_T_I210:
   12574 	case WM_T_I211:
   12575 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12576 		if (size > 15)
   12577 			size = 15;
   12578 		break;
   12579 	default:
   12580 		aprint_error_dev(sc->sc_dev,
   12581 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12582 		return -1;
   12583 		break;
   12584 	}
   12585 
   12586 	sc->sc_nvm_wordsize = 1 << size;
   12587 
   12588 	return 0;
   12589 }
   12590 
   12591 /*
   12592  * wm_nvm_ready_spi:
   12593  *
   12594  *	Wait for a SPI EEPROM to be ready for commands.
   12595  */
   12596 static int
   12597 wm_nvm_ready_spi(struct wm_softc *sc)
   12598 {
   12599 	uint32_t val;
   12600 	int usec;
   12601 
   12602 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12603 		device_xname(sc->sc_dev), __func__));
   12604 
   12605 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12606 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12607 		wm_eeprom_recvbits(sc, &val, 8);
   12608 		if ((val & SPI_SR_RDY) == 0)
   12609 			break;
   12610 	}
   12611 	if (usec >= SPI_MAX_RETRIES) {
   12612 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12613 		return -1;
   12614 	}
   12615 	return 0;
   12616 }
   12617 
   12618 /*
   12619  * wm_nvm_read_spi:
   12620  *
   12621  *	Read a work from the EEPROM using the SPI protocol.
   12622  */
   12623 static int
   12624 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12625 {
   12626 	uint32_t reg, val;
   12627 	int i;
   12628 	uint8_t opc;
   12629 	int rv = 0;
   12630 
   12631 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12632 		device_xname(sc->sc_dev), __func__));
   12633 
   12634 	if (sc->nvm.acquire(sc) != 0)
   12635 		return -1;
   12636 
   12637 	/* Clear SK and CS. */
   12638 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12639 	CSR_WRITE(sc, WMREG_EECD, reg);
   12640 	CSR_WRITE_FLUSH(sc);
   12641 	delay(2);
   12642 
   12643 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12644 		goto out;
   12645 
   12646 	/* Toggle CS to flush commands. */
   12647 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12648 	CSR_WRITE_FLUSH(sc);
   12649 	delay(2);
   12650 	CSR_WRITE(sc, WMREG_EECD, reg);
   12651 	CSR_WRITE_FLUSH(sc);
   12652 	delay(2);
   12653 
   12654 	opc = SPI_OPC_READ;
   12655 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12656 		opc |= SPI_OPC_A8;
   12657 
   12658 	wm_eeprom_sendbits(sc, opc, 8);
   12659 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12660 
   12661 	for (i = 0; i < wordcnt; i++) {
   12662 		wm_eeprom_recvbits(sc, &val, 16);
   12663 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12664 	}
   12665 
   12666 	/* Raise CS and clear SK. */
   12667 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12668 	CSR_WRITE(sc, WMREG_EECD, reg);
   12669 	CSR_WRITE_FLUSH(sc);
   12670 	delay(2);
   12671 
   12672 out:
   12673 	sc->nvm.release(sc);
   12674 	return rv;
   12675 }
   12676 
   12677 /* Using with EERD */
   12678 
   12679 static int
   12680 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12681 {
   12682 	uint32_t attempts = 100000;
   12683 	uint32_t i, reg = 0;
   12684 	int32_t done = -1;
   12685 
   12686 	for (i = 0; i < attempts; i++) {
   12687 		reg = CSR_READ(sc, rw);
   12688 
   12689 		if (reg & EERD_DONE) {
   12690 			done = 0;
   12691 			break;
   12692 		}
   12693 		delay(5);
   12694 	}
   12695 
   12696 	return done;
   12697 }
   12698 
   12699 static int
   12700 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12701 {
   12702 	int i, eerd = 0;
   12703 	int rv = 0;
   12704 
   12705 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12706 		device_xname(sc->sc_dev), __func__));
   12707 
   12708 	if (sc->nvm.acquire(sc) != 0)
   12709 		return -1;
   12710 
   12711 	for (i = 0; i < wordcnt; i++) {
   12712 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12713 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12714 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12715 		if (rv != 0) {
   12716 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12717 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12718 			break;
   12719 		}
   12720 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12721 	}
   12722 
   12723 	sc->nvm.release(sc);
   12724 	return rv;
   12725 }
   12726 
   12727 /* Flash */
   12728 
   12729 static int
   12730 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12731 {
   12732 	uint32_t eecd;
   12733 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12734 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12735 	uint32_t nvm_dword = 0;
   12736 	uint8_t sig_byte = 0;
   12737 	int rv;
   12738 
   12739 	switch (sc->sc_type) {
   12740 	case WM_T_PCH_SPT:
   12741 	case WM_T_PCH_CNP:
   12742 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12743 		act_offset = ICH_NVM_SIG_WORD * 2;
   12744 
   12745 		/* set bank to 0 in case flash read fails. */
   12746 		*bank = 0;
   12747 
   12748 		/* Check bank 0 */
   12749 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12750 		if (rv != 0)
   12751 			return rv;
   12752 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12753 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12754 			*bank = 0;
   12755 			return 0;
   12756 		}
   12757 
   12758 		/* Check bank 1 */
   12759 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12760 		    &nvm_dword);
   12761 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12762 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12763 			*bank = 1;
   12764 			return 0;
   12765 		}
   12766 		aprint_error_dev(sc->sc_dev,
   12767 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12768 		return -1;
   12769 	case WM_T_ICH8:
   12770 	case WM_T_ICH9:
   12771 		eecd = CSR_READ(sc, WMREG_EECD);
   12772 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12773 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12774 			return 0;
   12775 		}
   12776 		/* FALLTHROUGH */
   12777 	default:
   12778 		/* Default to 0 */
   12779 		*bank = 0;
   12780 
   12781 		/* Check bank 0 */
   12782 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12783 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12784 			*bank = 0;
   12785 			return 0;
   12786 		}
   12787 
   12788 		/* Check bank 1 */
   12789 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12790 		    &sig_byte);
   12791 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12792 			*bank = 1;
   12793 			return 0;
   12794 		}
   12795 	}
   12796 
   12797 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12798 		device_xname(sc->sc_dev)));
   12799 	return -1;
   12800 }
   12801 
   12802 /******************************************************************************
   12803  * This function does initial flash setup so that a new read/write/erase cycle
   12804  * can be started.
   12805  *
   12806  * sc - The pointer to the hw structure
   12807  ****************************************************************************/
   12808 static int32_t
   12809 wm_ich8_cycle_init(struct wm_softc *sc)
   12810 {
   12811 	uint16_t hsfsts;
   12812 	int32_t error = 1;
   12813 	int32_t i     = 0;
   12814 
   12815 	if (sc->sc_type >= WM_T_PCH_SPT)
   12816 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12817 	else
   12818 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12819 
   12820 	/* May be check the Flash Des Valid bit in Hw status */
   12821 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12822 		return error;
   12823 
   12824 	/* Clear FCERR in Hw status by writing 1 */
   12825 	/* Clear DAEL in Hw status by writing a 1 */
   12826 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12827 
   12828 	if (sc->sc_type >= WM_T_PCH_SPT)
   12829 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12830 	else
   12831 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12832 
   12833 	/*
   12834 	 * Either we should have a hardware SPI cycle in progress bit to check
   12835 	 * against, in order to start a new cycle or FDONE bit should be
   12836 	 * changed in the hardware so that it is 1 after harware reset, which
   12837 	 * can then be used as an indication whether a cycle is in progress or
   12838 	 * has been completed .. we should also have some software semaphore
   12839 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12840 	 * threads access to those bits can be sequentiallized or a way so that
   12841 	 * 2 threads dont start the cycle at the same time
   12842 	 */
   12843 
   12844 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12845 		/*
   12846 		 * There is no cycle running at present, so we can start a
   12847 		 * cycle
   12848 		 */
   12849 
   12850 		/* Begin by setting Flash Cycle Done. */
   12851 		hsfsts |= HSFSTS_DONE;
   12852 		if (sc->sc_type >= WM_T_PCH_SPT)
   12853 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12854 			    hsfsts & 0xffffUL);
   12855 		else
   12856 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12857 		error = 0;
   12858 	} else {
   12859 		/*
   12860 		 * otherwise poll for sometime so the current cycle has a
   12861 		 * chance to end before giving up.
   12862 		 */
   12863 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12864 			if (sc->sc_type >= WM_T_PCH_SPT)
   12865 				hsfsts = ICH8_FLASH_READ32(sc,
   12866 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12867 			else
   12868 				hsfsts = ICH8_FLASH_READ16(sc,
   12869 				    ICH_FLASH_HSFSTS);
   12870 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12871 				error = 0;
   12872 				break;
   12873 			}
   12874 			delay(1);
   12875 		}
   12876 		if (error == 0) {
   12877 			/*
   12878 			 * Successful in waiting for previous cycle to timeout,
   12879 			 * now set the Flash Cycle Done.
   12880 			 */
   12881 			hsfsts |= HSFSTS_DONE;
   12882 			if (sc->sc_type >= WM_T_PCH_SPT)
   12883 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12884 				    hsfsts & 0xffffUL);
   12885 			else
   12886 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12887 				    hsfsts);
   12888 		}
   12889 	}
   12890 	return error;
   12891 }
   12892 
   12893 /******************************************************************************
   12894  * This function starts a flash cycle and waits for its completion
   12895  *
   12896  * sc - The pointer to the hw structure
   12897  ****************************************************************************/
   12898 static int32_t
   12899 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12900 {
   12901 	uint16_t hsflctl;
   12902 	uint16_t hsfsts;
   12903 	int32_t error = 1;
   12904 	uint32_t i = 0;
   12905 
   12906 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12907 	if (sc->sc_type >= WM_T_PCH_SPT)
   12908 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12909 	else
   12910 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12911 	hsflctl |= HSFCTL_GO;
   12912 	if (sc->sc_type >= WM_T_PCH_SPT)
   12913 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12914 		    (uint32_t)hsflctl << 16);
   12915 	else
   12916 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12917 
   12918 	/* Wait till FDONE bit is set to 1 */
   12919 	do {
   12920 		if (sc->sc_type >= WM_T_PCH_SPT)
   12921 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12922 			    & 0xffffUL;
   12923 		else
   12924 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12925 		if (hsfsts & HSFSTS_DONE)
   12926 			break;
   12927 		delay(1);
   12928 		i++;
   12929 	} while (i < timeout);
   12930 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12931 		error = 0;
   12932 
   12933 	return error;
   12934 }
   12935 
   12936 /******************************************************************************
   12937  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12938  *
   12939  * sc - The pointer to the hw structure
   12940  * index - The index of the byte or word to read.
   12941  * size - Size of data to read, 1=byte 2=word, 4=dword
   12942  * data - Pointer to the word to store the value read.
   12943  *****************************************************************************/
   12944 static int32_t
   12945 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12946     uint32_t size, uint32_t *data)
   12947 {
   12948 	uint16_t hsfsts;
   12949 	uint16_t hsflctl;
   12950 	uint32_t flash_linear_address;
   12951 	uint32_t flash_data = 0;
   12952 	int32_t error = 1;
   12953 	int32_t count = 0;
   12954 
   12955 	if (size < 1  || size > 4 || data == 0x0 ||
   12956 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12957 		return error;
   12958 
   12959 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12960 	    sc->sc_ich8_flash_base;
   12961 
   12962 	do {
   12963 		delay(1);
   12964 		/* Steps */
   12965 		error = wm_ich8_cycle_init(sc);
   12966 		if (error)
   12967 			break;
   12968 
   12969 		if (sc->sc_type >= WM_T_PCH_SPT)
   12970 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12971 			    >> 16;
   12972 		else
   12973 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12974 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12975 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12976 		    & HSFCTL_BCOUNT_MASK;
   12977 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12978 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12979 			/*
   12980 			 * In SPT, This register is in Lan memory space, not
   12981 			 * flash. Therefore, only 32 bit access is supported.
   12982 			 */
   12983 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12984 			    (uint32_t)hsflctl << 16);
   12985 		} else
   12986 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12987 
   12988 		/*
   12989 		 * Write the last 24 bits of index into Flash Linear address
   12990 		 * field in Flash Address
   12991 		 */
   12992 		/* TODO: TBD maybe check the index against the size of flash */
   12993 
   12994 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12995 
   12996 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12997 
   12998 		/*
   12999 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13000 		 * the whole sequence a few more times, else read in (shift in)
   13001 		 * the Flash Data0, the order is least significant byte first
   13002 		 * msb to lsb
   13003 		 */
   13004 		if (error == 0) {
   13005 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13006 			if (size == 1)
   13007 				*data = (uint8_t)(flash_data & 0x000000FF);
   13008 			else if (size == 2)
   13009 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13010 			else if (size == 4)
   13011 				*data = (uint32_t)flash_data;
   13012 			break;
   13013 		} else {
   13014 			/*
   13015 			 * If we've gotten here, then things are probably
   13016 			 * completely hosed, but if the error condition is
   13017 			 * detected, it won't hurt to give it another try...
   13018 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13019 			 */
   13020 			if (sc->sc_type >= WM_T_PCH_SPT)
   13021 				hsfsts = ICH8_FLASH_READ32(sc,
   13022 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13023 			else
   13024 				hsfsts = ICH8_FLASH_READ16(sc,
   13025 				    ICH_FLASH_HSFSTS);
   13026 
   13027 			if (hsfsts & HSFSTS_ERR) {
   13028 				/* Repeat for some time before giving up. */
   13029 				continue;
   13030 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13031 				break;
   13032 		}
   13033 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13034 
   13035 	return error;
   13036 }
   13037 
   13038 /******************************************************************************
   13039  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13040  *
   13041  * sc - pointer to wm_hw structure
   13042  * index - The index of the byte to read.
   13043  * data - Pointer to a byte to store the value read.
   13044  *****************************************************************************/
   13045 static int32_t
   13046 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13047 {
   13048 	int32_t status;
   13049 	uint32_t word = 0;
   13050 
   13051 	status = wm_read_ich8_data(sc, index, 1, &word);
   13052 	if (status == 0)
   13053 		*data = (uint8_t)word;
   13054 	else
   13055 		*data = 0;
   13056 
   13057 	return status;
   13058 }
   13059 
   13060 /******************************************************************************
   13061  * Reads a word from the NVM using the ICH8 flash access registers.
   13062  *
   13063  * sc - pointer to wm_hw structure
   13064  * index - The starting byte index of the word to read.
   13065  * data - Pointer to a word to store the value read.
   13066  *****************************************************************************/
   13067 static int32_t
   13068 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13069 {
   13070 	int32_t status;
   13071 	uint32_t word = 0;
   13072 
   13073 	status = wm_read_ich8_data(sc, index, 2, &word);
   13074 	if (status == 0)
   13075 		*data = (uint16_t)word;
   13076 	else
   13077 		*data = 0;
   13078 
   13079 	return status;
   13080 }
   13081 
   13082 /******************************************************************************
   13083  * Reads a dword from the NVM using the ICH8 flash access registers.
   13084  *
   13085  * sc - pointer to wm_hw structure
   13086  * index - The starting byte index of the word to read.
   13087  * data - Pointer to a word to store the value read.
   13088  *****************************************************************************/
   13089 static int32_t
   13090 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13091 {
   13092 	int32_t status;
   13093 
   13094 	status = wm_read_ich8_data(sc, index, 4, data);
   13095 	return status;
   13096 }
   13097 
   13098 /******************************************************************************
   13099  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13100  * register.
   13101  *
   13102  * sc - Struct containing variables accessed by shared code
   13103  * offset - offset of word in the EEPROM to read
   13104  * data - word read from the EEPROM
   13105  * words - number of words to read
   13106  *****************************************************************************/
   13107 static int
   13108 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13109 {
   13110 	int32_t	 rv = 0;
   13111 	uint32_t flash_bank = 0;
   13112 	uint32_t act_offset = 0;
   13113 	uint32_t bank_offset = 0;
   13114 	uint16_t word = 0;
   13115 	uint16_t i = 0;
   13116 
   13117 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13118 		device_xname(sc->sc_dev), __func__));
   13119 
   13120 	if (sc->nvm.acquire(sc) != 0)
   13121 		return -1;
   13122 
   13123 	/*
   13124 	 * We need to know which is the valid flash bank.  In the event
   13125 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13126 	 * managing flash_bank. So it cannot be trusted and needs
   13127 	 * to be updated with each read.
   13128 	 */
   13129 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13130 	if (rv) {
   13131 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13132 			device_xname(sc->sc_dev)));
   13133 		flash_bank = 0;
   13134 	}
   13135 
   13136 	/*
   13137 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13138 	 * size
   13139 	 */
   13140 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13141 
   13142 	for (i = 0; i < words; i++) {
   13143 		/* The NVM part needs a byte offset, hence * 2 */
   13144 		act_offset = bank_offset + ((offset + i) * 2);
   13145 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13146 		if (rv) {
   13147 			aprint_error_dev(sc->sc_dev,
   13148 			    "%s: failed to read NVM\n", __func__);
   13149 			break;
   13150 		}
   13151 		data[i] = word;
   13152 	}
   13153 
   13154 	sc->nvm.release(sc);
   13155 	return rv;
   13156 }
   13157 
   13158 /******************************************************************************
   13159  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13160  * register.
   13161  *
   13162  * sc - Struct containing variables accessed by shared code
   13163  * offset - offset of word in the EEPROM to read
   13164  * data - word read from the EEPROM
   13165  * words - number of words to read
   13166  *****************************************************************************/
   13167 static int
   13168 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13169 {
   13170 	int32_t	 rv = 0;
   13171 	uint32_t flash_bank = 0;
   13172 	uint32_t act_offset = 0;
   13173 	uint32_t bank_offset = 0;
   13174 	uint32_t dword = 0;
   13175 	uint16_t i = 0;
   13176 
   13177 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13178 		device_xname(sc->sc_dev), __func__));
   13179 
   13180 	if (sc->nvm.acquire(sc) != 0)
   13181 		return -1;
   13182 
   13183 	/*
   13184 	 * We need to know which is the valid flash bank.  In the event
   13185 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13186 	 * managing flash_bank. So it cannot be trusted and needs
   13187 	 * to be updated with each read.
   13188 	 */
   13189 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13190 	if (rv) {
   13191 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13192 			device_xname(sc->sc_dev)));
   13193 		flash_bank = 0;
   13194 	}
   13195 
   13196 	/*
   13197 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13198 	 * size
   13199 	 */
   13200 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13201 
   13202 	for (i = 0; i < words; i++) {
   13203 		/* The NVM part needs a byte offset, hence * 2 */
   13204 		act_offset = bank_offset + ((offset + i) * 2);
   13205 		/* but we must read dword aligned, so mask ... */
   13206 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13207 		if (rv) {
   13208 			aprint_error_dev(sc->sc_dev,
   13209 			    "%s: failed to read NVM\n", __func__);
   13210 			break;
   13211 		}
   13212 		/* ... and pick out low or high word */
   13213 		if ((act_offset & 0x2) == 0)
   13214 			data[i] = (uint16_t)(dword & 0xFFFF);
   13215 		else
   13216 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13217 	}
   13218 
   13219 	sc->nvm.release(sc);
   13220 	return rv;
   13221 }
   13222 
   13223 /* iNVM */
   13224 
   13225 static int
   13226 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13227 {
   13228 	int32_t	 rv = 0;
   13229 	uint32_t invm_dword;
   13230 	uint16_t i;
   13231 	uint8_t record_type, word_address;
   13232 
   13233 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13234 		device_xname(sc->sc_dev), __func__));
   13235 
   13236 	for (i = 0; i < INVM_SIZE; i++) {
   13237 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13238 		/* Get record type */
   13239 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13240 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13241 			break;
   13242 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13243 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13244 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13245 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13246 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13247 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13248 			if (word_address == address) {
   13249 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13250 				rv = 0;
   13251 				break;
   13252 			}
   13253 		}
   13254 	}
   13255 
   13256 	return rv;
   13257 }
   13258 
   13259 static int
   13260 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13261 {
   13262 	int rv = 0;
   13263 	int i;
   13264 
   13265 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13266 		device_xname(sc->sc_dev), __func__));
   13267 
   13268 	if (sc->nvm.acquire(sc) != 0)
   13269 		return -1;
   13270 
   13271 	for (i = 0; i < words; i++) {
   13272 		switch (offset + i) {
   13273 		case NVM_OFF_MACADDR:
   13274 		case NVM_OFF_MACADDR1:
   13275 		case NVM_OFF_MACADDR2:
   13276 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13277 			if (rv != 0) {
   13278 				data[i] = 0xffff;
   13279 				rv = -1;
   13280 			}
   13281 			break;
   13282 		case NVM_OFF_CFG2:
   13283 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13284 			if (rv != 0) {
   13285 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13286 				rv = 0;
   13287 			}
   13288 			break;
   13289 		case NVM_OFF_CFG4:
   13290 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13291 			if (rv != 0) {
   13292 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13293 				rv = 0;
   13294 			}
   13295 			break;
   13296 		case NVM_OFF_LED_1_CFG:
   13297 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13298 			if (rv != 0) {
   13299 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13300 				rv = 0;
   13301 			}
   13302 			break;
   13303 		case NVM_OFF_LED_0_2_CFG:
   13304 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13305 			if (rv != 0) {
   13306 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13307 				rv = 0;
   13308 			}
   13309 			break;
   13310 		case NVM_OFF_ID_LED_SETTINGS:
   13311 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13312 			if (rv != 0) {
   13313 				*data = ID_LED_RESERVED_FFFF;
   13314 				rv = 0;
   13315 			}
   13316 			break;
   13317 		default:
   13318 			DPRINTF(WM_DEBUG_NVM,
   13319 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13320 			*data = NVM_RESERVED_WORD;
   13321 			break;
   13322 		}
   13323 	}
   13324 
   13325 	sc->nvm.release(sc);
   13326 	return rv;
   13327 }
   13328 
   13329 /* Lock, detecting NVM type, validate checksum, version and read */
   13330 
   13331 static int
   13332 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13333 {
   13334 	uint32_t eecd = 0;
   13335 
   13336 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13337 	    || sc->sc_type == WM_T_82583) {
   13338 		eecd = CSR_READ(sc, WMREG_EECD);
   13339 
   13340 		/* Isolate bits 15 & 16 */
   13341 		eecd = ((eecd >> 15) & 0x03);
   13342 
   13343 		/* If both bits are set, device is Flash type */
   13344 		if (eecd == 0x03)
   13345 			return 0;
   13346 	}
   13347 	return 1;
   13348 }
   13349 
   13350 static int
   13351 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13352 {
   13353 	uint32_t eec;
   13354 
   13355 	eec = CSR_READ(sc, WMREG_EEC);
   13356 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13357 		return 1;
   13358 
   13359 	return 0;
   13360 }
   13361 
   13362 /*
   13363  * wm_nvm_validate_checksum
   13364  *
   13365  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13366  */
   13367 static int
   13368 wm_nvm_validate_checksum(struct wm_softc *sc)
   13369 {
   13370 	uint16_t checksum;
   13371 	uint16_t eeprom_data;
   13372 #ifdef WM_DEBUG
   13373 	uint16_t csum_wordaddr, valid_checksum;
   13374 #endif
   13375 	int i;
   13376 
   13377 	checksum = 0;
   13378 
   13379 	/* Don't check for I211 */
   13380 	if (sc->sc_type == WM_T_I211)
   13381 		return 0;
   13382 
   13383 #ifdef WM_DEBUG
   13384 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13385 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13386 		csum_wordaddr = NVM_OFF_COMPAT;
   13387 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13388 	} else {
   13389 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13390 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13391 	}
   13392 
   13393 	/* Dump EEPROM image for debug */
   13394 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13395 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13396 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13397 		/* XXX PCH_SPT? */
   13398 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13399 		if ((eeprom_data & valid_checksum) == 0)
   13400 			DPRINTF(WM_DEBUG_NVM,
   13401 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13402 				device_xname(sc->sc_dev), eeprom_data,
   13403 				    valid_checksum));
   13404 	}
   13405 
   13406 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13407 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13408 		for (i = 0; i < NVM_SIZE; i++) {
   13409 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13410 				printf("XXXX ");
   13411 			else
   13412 				printf("%04hx ", eeprom_data);
   13413 			if (i % 8 == 7)
   13414 				printf("\n");
   13415 		}
   13416 	}
   13417 
   13418 #endif /* WM_DEBUG */
   13419 
   13420 	for (i = 0; i < NVM_SIZE; i++) {
   13421 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13422 			return 1;
   13423 		checksum += eeprom_data;
   13424 	}
   13425 
   13426 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13427 #ifdef WM_DEBUG
   13428 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13429 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13430 #endif
   13431 	}
   13432 
   13433 	return 0;
   13434 }
   13435 
   13436 static void
   13437 wm_nvm_version_invm(struct wm_softc *sc)
   13438 {
   13439 	uint32_t dword;
   13440 
   13441 	/*
   13442 	 * Linux's code to decode version is very strange, so we don't
   13443 	 * obey that algorithm and just use word 61 as the document.
   13444 	 * Perhaps it's not perfect though...
   13445 	 *
   13446 	 * Example:
   13447 	 *
   13448 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13449 	 */
   13450 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13451 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13452 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13453 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13454 }
   13455 
   13456 static void
   13457 wm_nvm_version(struct wm_softc *sc)
   13458 {
   13459 	uint16_t major, minor, build, patch;
   13460 	uint16_t uid0, uid1;
   13461 	uint16_t nvm_data;
   13462 	uint16_t off;
   13463 	bool check_version = false;
   13464 	bool check_optionrom = false;
   13465 	bool have_build = false;
   13466 	bool have_uid = true;
   13467 
   13468 	/*
   13469 	 * Version format:
   13470 	 *
   13471 	 * XYYZ
   13472 	 * X0YZ
   13473 	 * X0YY
   13474 	 *
   13475 	 * Example:
   13476 	 *
   13477 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13478 	 *	82571	0x50a6	5.10.6?
   13479 	 *	82572	0x506a	5.6.10?
   13480 	 *	82572EI	0x5069	5.6.9?
   13481 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13482 	 *		0x2013	2.1.3?
   13483 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13484 	 */
   13485 
   13486 	/*
   13487 	 * XXX
   13488 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13489 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13490 	 */
   13491 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13492 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13493 		have_uid = false;
   13494 
   13495 	switch (sc->sc_type) {
   13496 	case WM_T_82571:
   13497 	case WM_T_82572:
   13498 	case WM_T_82574:
   13499 	case WM_T_82583:
   13500 		check_version = true;
   13501 		check_optionrom = true;
   13502 		have_build = true;
   13503 		break;
   13504 	case WM_T_82575:
   13505 	case WM_T_82576:
   13506 	case WM_T_82580:
   13507 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13508 			check_version = true;
   13509 		break;
   13510 	case WM_T_I211:
   13511 		wm_nvm_version_invm(sc);
   13512 		have_uid = false;
   13513 		goto printver;
   13514 	case WM_T_I210:
   13515 		if (!wm_nvm_flash_presence_i210(sc)) {
   13516 			wm_nvm_version_invm(sc);
   13517 			have_uid = false;
   13518 			goto printver;
   13519 		}
   13520 		/* FALLTHROUGH */
   13521 	case WM_T_I350:
   13522 	case WM_T_I354:
   13523 		check_version = true;
   13524 		check_optionrom = true;
   13525 		break;
   13526 	default:
   13527 		return;
   13528 	}
   13529 	if (check_version
   13530 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13531 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13532 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13533 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13534 			build = nvm_data & NVM_BUILD_MASK;
   13535 			have_build = true;
   13536 		} else
   13537 			minor = nvm_data & 0x00ff;
   13538 
   13539 		/* Decimal */
   13540 		minor = (minor / 16) * 10 + (minor % 16);
   13541 		sc->sc_nvm_ver_major = major;
   13542 		sc->sc_nvm_ver_minor = minor;
   13543 
   13544 printver:
   13545 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13546 		    sc->sc_nvm_ver_minor);
   13547 		if (have_build) {
   13548 			sc->sc_nvm_ver_build = build;
   13549 			aprint_verbose(".%d", build);
   13550 		}
   13551 	}
   13552 
   13553 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13554 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13555 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13556 		/* Option ROM Version */
   13557 		if ((off != 0x0000) && (off != 0xffff)) {
   13558 			int rv;
   13559 
   13560 			off += NVM_COMBO_VER_OFF;
   13561 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13562 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13563 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13564 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13565 				/* 16bits */
   13566 				major = uid0 >> 8;
   13567 				build = (uid0 << 8) | (uid1 >> 8);
   13568 				patch = uid1 & 0x00ff;
   13569 				aprint_verbose(", option ROM Version %d.%d.%d",
   13570 				    major, build, patch);
   13571 			}
   13572 		}
   13573 	}
   13574 
   13575 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13576 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13577 }
   13578 
   13579 /*
   13580  * wm_nvm_read:
   13581  *
   13582  *	Read data from the serial EEPROM.
   13583  */
   13584 static int
   13585 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13586 {
   13587 	int rv;
   13588 
   13589 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13590 		device_xname(sc->sc_dev), __func__));
   13591 
   13592 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13593 		return -1;
   13594 
   13595 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13596 
   13597 	return rv;
   13598 }
   13599 
   13600 /*
   13601  * Hardware semaphores.
   13602  * Very complexed...
   13603  */
   13604 
   13605 static int
   13606 wm_get_null(struct wm_softc *sc)
   13607 {
   13608 
   13609 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13610 		device_xname(sc->sc_dev), __func__));
   13611 	return 0;
   13612 }
   13613 
   13614 static void
   13615 wm_put_null(struct wm_softc *sc)
   13616 {
   13617 
   13618 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13619 		device_xname(sc->sc_dev), __func__));
   13620 	return;
   13621 }
   13622 
   13623 static int
   13624 wm_get_eecd(struct wm_softc *sc)
   13625 {
   13626 	uint32_t reg;
   13627 	int x;
   13628 
   13629 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13630 		device_xname(sc->sc_dev), __func__));
   13631 
   13632 	reg = CSR_READ(sc, WMREG_EECD);
   13633 
   13634 	/* Request EEPROM access. */
   13635 	reg |= EECD_EE_REQ;
   13636 	CSR_WRITE(sc, WMREG_EECD, reg);
   13637 
   13638 	/* ..and wait for it to be granted. */
   13639 	for (x = 0; x < 1000; x++) {
   13640 		reg = CSR_READ(sc, WMREG_EECD);
   13641 		if (reg & EECD_EE_GNT)
   13642 			break;
   13643 		delay(5);
   13644 	}
   13645 	if ((reg & EECD_EE_GNT) == 0) {
   13646 		aprint_error_dev(sc->sc_dev,
   13647 		    "could not acquire EEPROM GNT\n");
   13648 		reg &= ~EECD_EE_REQ;
   13649 		CSR_WRITE(sc, WMREG_EECD, reg);
   13650 		return -1;
   13651 	}
   13652 
   13653 	return 0;
   13654 }
   13655 
   13656 static void
   13657 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13658 {
   13659 
   13660 	*eecd |= EECD_SK;
   13661 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13662 	CSR_WRITE_FLUSH(sc);
   13663 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13664 		delay(1);
   13665 	else
   13666 		delay(50);
   13667 }
   13668 
   13669 static void
   13670 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13671 {
   13672 
   13673 	*eecd &= ~EECD_SK;
   13674 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13675 	CSR_WRITE_FLUSH(sc);
   13676 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13677 		delay(1);
   13678 	else
   13679 		delay(50);
   13680 }
   13681 
   13682 static void
   13683 wm_put_eecd(struct wm_softc *sc)
   13684 {
   13685 	uint32_t reg;
   13686 
   13687 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13688 		device_xname(sc->sc_dev), __func__));
   13689 
   13690 	/* Stop nvm */
   13691 	reg = CSR_READ(sc, WMREG_EECD);
   13692 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13693 		/* Pull CS high */
   13694 		reg |= EECD_CS;
   13695 		wm_nvm_eec_clock_lower(sc, &reg);
   13696 	} else {
   13697 		/* CS on Microwire is active-high */
   13698 		reg &= ~(EECD_CS | EECD_DI);
   13699 		CSR_WRITE(sc, WMREG_EECD, reg);
   13700 		wm_nvm_eec_clock_raise(sc, &reg);
   13701 		wm_nvm_eec_clock_lower(sc, &reg);
   13702 	}
   13703 
   13704 	reg = CSR_READ(sc, WMREG_EECD);
   13705 	reg &= ~EECD_EE_REQ;
   13706 	CSR_WRITE(sc, WMREG_EECD, reg);
   13707 
   13708 	return;
   13709 }
   13710 
   13711 /*
   13712  * Get hardware semaphore.
   13713  * Same as e1000_get_hw_semaphore_generic()
   13714  */
   13715 static int
   13716 wm_get_swsm_semaphore(struct wm_softc *sc)
   13717 {
   13718 	int32_t timeout;
   13719 	uint32_t swsm;
   13720 
   13721 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13722 		device_xname(sc->sc_dev), __func__));
   13723 	KASSERT(sc->sc_nvm_wordsize > 0);
   13724 
   13725 retry:
   13726 	/* Get the SW semaphore. */
   13727 	timeout = sc->sc_nvm_wordsize + 1;
   13728 	while (timeout) {
   13729 		swsm = CSR_READ(sc, WMREG_SWSM);
   13730 
   13731 		if ((swsm & SWSM_SMBI) == 0)
   13732 			break;
   13733 
   13734 		delay(50);
   13735 		timeout--;
   13736 	}
   13737 
   13738 	if (timeout == 0) {
   13739 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13740 			/*
   13741 			 * In rare circumstances, the SW semaphore may already
   13742 			 * be held unintentionally. Clear the semaphore once
   13743 			 * before giving up.
   13744 			 */
   13745 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13746 			wm_put_swsm_semaphore(sc);
   13747 			goto retry;
   13748 		}
   13749 		aprint_error_dev(sc->sc_dev,
   13750 		    "could not acquire SWSM SMBI\n");
   13751 		return 1;
   13752 	}
   13753 
   13754 	/* Get the FW semaphore. */
   13755 	timeout = sc->sc_nvm_wordsize + 1;
   13756 	while (timeout) {
   13757 		swsm = CSR_READ(sc, WMREG_SWSM);
   13758 		swsm |= SWSM_SWESMBI;
   13759 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13760 		/* If we managed to set the bit we got the semaphore. */
   13761 		swsm = CSR_READ(sc, WMREG_SWSM);
   13762 		if (swsm & SWSM_SWESMBI)
   13763 			break;
   13764 
   13765 		delay(50);
   13766 		timeout--;
   13767 	}
   13768 
   13769 	if (timeout == 0) {
   13770 		aprint_error_dev(sc->sc_dev,
   13771 		    "could not acquire SWSM SWESMBI\n");
   13772 		/* Release semaphores */
   13773 		wm_put_swsm_semaphore(sc);
   13774 		return 1;
   13775 	}
   13776 	return 0;
   13777 }
   13778 
   13779 /*
   13780  * Put hardware semaphore.
   13781  * Same as e1000_put_hw_semaphore_generic()
   13782  */
   13783 static void
   13784 wm_put_swsm_semaphore(struct wm_softc *sc)
   13785 {
   13786 	uint32_t swsm;
   13787 
   13788 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13789 		device_xname(sc->sc_dev), __func__));
   13790 
   13791 	swsm = CSR_READ(sc, WMREG_SWSM);
   13792 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13793 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13794 }
   13795 
   13796 /*
   13797  * Get SW/FW semaphore.
   13798  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13799  */
   13800 static int
   13801 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13802 {
   13803 	uint32_t swfw_sync;
   13804 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13805 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13806 	int timeout;
   13807 
   13808 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13809 		device_xname(sc->sc_dev), __func__));
   13810 
   13811 	if (sc->sc_type == WM_T_80003)
   13812 		timeout = 50;
   13813 	else
   13814 		timeout = 200;
   13815 
   13816 	while (timeout) {
   13817 		if (wm_get_swsm_semaphore(sc)) {
   13818 			aprint_error_dev(sc->sc_dev,
   13819 			    "%s: failed to get semaphore\n",
   13820 			    __func__);
   13821 			return 1;
   13822 		}
   13823 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13824 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13825 			swfw_sync |= swmask;
   13826 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13827 			wm_put_swsm_semaphore(sc);
   13828 			return 0;
   13829 		}
   13830 		wm_put_swsm_semaphore(sc);
   13831 		delay(5000);
   13832 		timeout--;
   13833 	}
   13834 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13835 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13836 	return 1;
   13837 }
   13838 
   13839 static void
   13840 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13841 {
   13842 	uint32_t swfw_sync;
   13843 
   13844 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13845 		device_xname(sc->sc_dev), __func__));
   13846 
   13847 	while (wm_get_swsm_semaphore(sc) != 0)
   13848 		continue;
   13849 
   13850 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13851 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13852 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13853 
   13854 	wm_put_swsm_semaphore(sc);
   13855 }
   13856 
   13857 static int
   13858 wm_get_nvm_80003(struct wm_softc *sc)
   13859 {
   13860 	int rv;
   13861 
   13862 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13863 		device_xname(sc->sc_dev), __func__));
   13864 
   13865 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13866 		aprint_error_dev(sc->sc_dev,
   13867 		    "%s: failed to get semaphore(SWFW)\n",
   13868 		    __func__);
   13869 		return rv;
   13870 	}
   13871 
   13872 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13873 	    && (rv = wm_get_eecd(sc)) != 0) {
   13874 		aprint_error_dev(sc->sc_dev,
   13875 		    "%s: failed to get semaphore(EECD)\n",
   13876 		    __func__);
   13877 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13878 		return rv;
   13879 	}
   13880 
   13881 	return 0;
   13882 }
   13883 
   13884 static void
   13885 wm_put_nvm_80003(struct wm_softc *sc)
   13886 {
   13887 
   13888 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13889 		device_xname(sc->sc_dev), __func__));
   13890 
   13891 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13892 		wm_put_eecd(sc);
   13893 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13894 }
   13895 
   13896 static int
   13897 wm_get_nvm_82571(struct wm_softc *sc)
   13898 {
   13899 	int rv;
   13900 
   13901 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13902 		device_xname(sc->sc_dev), __func__));
   13903 
   13904 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13905 		return rv;
   13906 
   13907 	switch (sc->sc_type) {
   13908 	case WM_T_82573:
   13909 		break;
   13910 	default:
   13911 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13912 			rv = wm_get_eecd(sc);
   13913 		break;
   13914 	}
   13915 
   13916 	if (rv != 0) {
   13917 		aprint_error_dev(sc->sc_dev,
   13918 		    "%s: failed to get semaphore\n",
   13919 		    __func__);
   13920 		wm_put_swsm_semaphore(sc);
   13921 	}
   13922 
   13923 	return rv;
   13924 }
   13925 
   13926 static void
   13927 wm_put_nvm_82571(struct wm_softc *sc)
   13928 {
   13929 
   13930 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13931 		device_xname(sc->sc_dev), __func__));
   13932 
   13933 	switch (sc->sc_type) {
   13934 	case WM_T_82573:
   13935 		break;
   13936 	default:
   13937 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13938 			wm_put_eecd(sc);
   13939 		break;
   13940 	}
   13941 
   13942 	wm_put_swsm_semaphore(sc);
   13943 }
   13944 
   13945 static int
   13946 wm_get_phy_82575(struct wm_softc *sc)
   13947 {
   13948 
   13949 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13950 		device_xname(sc->sc_dev), __func__));
   13951 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13952 }
   13953 
   13954 static void
   13955 wm_put_phy_82575(struct wm_softc *sc)
   13956 {
   13957 
   13958 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13959 		device_xname(sc->sc_dev), __func__));
   13960 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13961 }
   13962 
   13963 static int
   13964 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13965 {
   13966 	uint32_t ext_ctrl;
   13967 	int timeout = 200;
   13968 
   13969 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13970 		device_xname(sc->sc_dev), __func__));
   13971 
   13972 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13973 	for (timeout = 0; timeout < 200; timeout++) {
   13974 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13975 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13976 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13977 
   13978 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13979 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13980 			return 0;
   13981 		delay(5000);
   13982 	}
   13983 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13984 	    device_xname(sc->sc_dev), ext_ctrl);
   13985 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13986 	return 1;
   13987 }
   13988 
   13989 static void
   13990 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13991 {
   13992 	uint32_t ext_ctrl;
   13993 
   13994 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13995 		device_xname(sc->sc_dev), __func__));
   13996 
   13997 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13998 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13999 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14000 
   14001 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14002 }
   14003 
   14004 static int
   14005 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14006 {
   14007 	uint32_t ext_ctrl;
   14008 	int timeout;
   14009 
   14010 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14011 		device_xname(sc->sc_dev), __func__));
   14012 	mutex_enter(sc->sc_ich_phymtx);
   14013 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14014 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14015 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14016 			break;
   14017 		delay(1000);
   14018 	}
   14019 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14020 		printf("%s: SW has already locked the resource\n",
   14021 		    device_xname(sc->sc_dev));
   14022 		goto out;
   14023 	}
   14024 
   14025 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14026 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14027 	for (timeout = 0; timeout < 1000; timeout++) {
   14028 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14029 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14030 			break;
   14031 		delay(1000);
   14032 	}
   14033 	if (timeout >= 1000) {
   14034 		printf("%s: failed to acquire semaphore\n",
   14035 		    device_xname(sc->sc_dev));
   14036 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14037 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14038 		goto out;
   14039 	}
   14040 	return 0;
   14041 
   14042 out:
   14043 	mutex_exit(sc->sc_ich_phymtx);
   14044 	return 1;
   14045 }
   14046 
   14047 static void
   14048 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14049 {
   14050 	uint32_t ext_ctrl;
   14051 
   14052 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14053 		device_xname(sc->sc_dev), __func__));
   14054 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14055 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14056 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14057 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14058 	} else {
   14059 		printf("%s: Semaphore unexpectedly released\n",
   14060 		    device_xname(sc->sc_dev));
   14061 	}
   14062 
   14063 	mutex_exit(sc->sc_ich_phymtx);
   14064 }
   14065 
   14066 static int
   14067 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14068 {
   14069 
   14070 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14071 		device_xname(sc->sc_dev), __func__));
   14072 	mutex_enter(sc->sc_ich_nvmmtx);
   14073 
   14074 	return 0;
   14075 }
   14076 
   14077 static void
   14078 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14079 {
   14080 
   14081 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14082 		device_xname(sc->sc_dev), __func__));
   14083 	mutex_exit(sc->sc_ich_nvmmtx);
   14084 }
   14085 
   14086 static int
   14087 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14088 {
   14089 	int i = 0;
   14090 	uint32_t reg;
   14091 
   14092 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14093 		device_xname(sc->sc_dev), __func__));
   14094 
   14095 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14096 	do {
   14097 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14098 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14099 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14100 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14101 			break;
   14102 		delay(2*1000);
   14103 		i++;
   14104 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14105 
   14106 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14107 		wm_put_hw_semaphore_82573(sc);
   14108 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14109 		    device_xname(sc->sc_dev));
   14110 		return -1;
   14111 	}
   14112 
   14113 	return 0;
   14114 }
   14115 
   14116 static void
   14117 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14118 {
   14119 	uint32_t reg;
   14120 
   14121 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14122 		device_xname(sc->sc_dev), __func__));
   14123 
   14124 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14125 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14126 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14127 }
   14128 
   14129 /*
   14130  * Management mode and power management related subroutines.
   14131  * BMC, AMT, suspend/resume and EEE.
   14132  */
   14133 
   14134 #ifdef WM_WOL
   14135 static int
   14136 wm_check_mng_mode(struct wm_softc *sc)
   14137 {
   14138 	int rv;
   14139 
   14140 	switch (sc->sc_type) {
   14141 	case WM_T_ICH8:
   14142 	case WM_T_ICH9:
   14143 	case WM_T_ICH10:
   14144 	case WM_T_PCH:
   14145 	case WM_T_PCH2:
   14146 	case WM_T_PCH_LPT:
   14147 	case WM_T_PCH_SPT:
   14148 	case WM_T_PCH_CNP:
   14149 		rv = wm_check_mng_mode_ich8lan(sc);
   14150 		break;
   14151 	case WM_T_82574:
   14152 	case WM_T_82583:
   14153 		rv = wm_check_mng_mode_82574(sc);
   14154 		break;
   14155 	case WM_T_82571:
   14156 	case WM_T_82572:
   14157 	case WM_T_82573:
   14158 	case WM_T_80003:
   14159 		rv = wm_check_mng_mode_generic(sc);
   14160 		break;
   14161 	default:
   14162 		/* noting to do */
   14163 		rv = 0;
   14164 		break;
   14165 	}
   14166 
   14167 	return rv;
   14168 }
   14169 
   14170 static int
   14171 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14172 {
   14173 	uint32_t fwsm;
   14174 
   14175 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14176 
   14177 	if (((fwsm & FWSM_FW_VALID) != 0)
   14178 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14179 		return 1;
   14180 
   14181 	return 0;
   14182 }
   14183 
   14184 static int
   14185 wm_check_mng_mode_82574(struct wm_softc *sc)
   14186 {
   14187 	uint16_t data;
   14188 
   14189 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14190 
   14191 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14192 		return 1;
   14193 
   14194 	return 0;
   14195 }
   14196 
   14197 static int
   14198 wm_check_mng_mode_generic(struct wm_softc *sc)
   14199 {
   14200 	uint32_t fwsm;
   14201 
   14202 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14203 
   14204 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14205 		return 1;
   14206 
   14207 	return 0;
   14208 }
   14209 #endif /* WM_WOL */
   14210 
   14211 static int
   14212 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14213 {
   14214 	uint32_t manc, fwsm, factps;
   14215 
   14216 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14217 		return 0;
   14218 
   14219 	manc = CSR_READ(sc, WMREG_MANC);
   14220 
   14221 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14222 		device_xname(sc->sc_dev), manc));
   14223 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14224 		return 0;
   14225 
   14226 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14227 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14228 		factps = CSR_READ(sc, WMREG_FACTPS);
   14229 		if (((factps & FACTPS_MNGCG) == 0)
   14230 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14231 			return 1;
   14232 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14233 		uint16_t data;
   14234 
   14235 		factps = CSR_READ(sc, WMREG_FACTPS);
   14236 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14237 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14238 			device_xname(sc->sc_dev), factps, data));
   14239 		if (((factps & FACTPS_MNGCG) == 0)
   14240 		    && ((data & NVM_CFG2_MNGM_MASK)
   14241 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14242 			return 1;
   14243 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14244 	    && ((manc & MANC_ASF_EN) == 0))
   14245 		return 1;
   14246 
   14247 	return 0;
   14248 }
   14249 
   14250 static bool
   14251 wm_phy_resetisblocked(struct wm_softc *sc)
   14252 {
   14253 	bool blocked = false;
   14254 	uint32_t reg;
   14255 	int i = 0;
   14256 
   14257 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14258 		device_xname(sc->sc_dev), __func__));
   14259 
   14260 	switch (sc->sc_type) {
   14261 	case WM_T_ICH8:
   14262 	case WM_T_ICH9:
   14263 	case WM_T_ICH10:
   14264 	case WM_T_PCH:
   14265 	case WM_T_PCH2:
   14266 	case WM_T_PCH_LPT:
   14267 	case WM_T_PCH_SPT:
   14268 	case WM_T_PCH_CNP:
   14269 		do {
   14270 			reg = CSR_READ(sc, WMREG_FWSM);
   14271 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14272 				blocked = true;
   14273 				delay(10*1000);
   14274 				continue;
   14275 			}
   14276 			blocked = false;
   14277 		} while (blocked && (i++ < 30));
   14278 		return blocked;
   14279 		break;
   14280 	case WM_T_82571:
   14281 	case WM_T_82572:
   14282 	case WM_T_82573:
   14283 	case WM_T_82574:
   14284 	case WM_T_82583:
   14285 	case WM_T_80003:
   14286 		reg = CSR_READ(sc, WMREG_MANC);
   14287 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14288 			return true;
   14289 		else
   14290 			return false;
   14291 		break;
   14292 	default:
   14293 		/* no problem */
   14294 		break;
   14295 	}
   14296 
   14297 	return false;
   14298 }
   14299 
   14300 static void
   14301 wm_get_hw_control(struct wm_softc *sc)
   14302 {
   14303 	uint32_t reg;
   14304 
   14305 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14306 		device_xname(sc->sc_dev), __func__));
   14307 
   14308 	if (sc->sc_type == WM_T_82573) {
   14309 		reg = CSR_READ(sc, WMREG_SWSM);
   14310 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14311 	} else if (sc->sc_type >= WM_T_82571) {
   14312 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14313 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14314 	}
   14315 }
   14316 
   14317 static void
   14318 wm_release_hw_control(struct wm_softc *sc)
   14319 {
   14320 	uint32_t reg;
   14321 
   14322 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14323 		device_xname(sc->sc_dev), __func__));
   14324 
   14325 	if (sc->sc_type == WM_T_82573) {
   14326 		reg = CSR_READ(sc, WMREG_SWSM);
   14327 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14328 	} else if (sc->sc_type >= WM_T_82571) {
   14329 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14330 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14331 	}
   14332 }
   14333 
   14334 static void
   14335 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14336 {
   14337 	uint32_t reg;
   14338 
   14339 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14340 		device_xname(sc->sc_dev), __func__));
   14341 
   14342 	if (sc->sc_type < WM_T_PCH2)
   14343 		return;
   14344 
   14345 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14346 
   14347 	if (gate)
   14348 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14349 	else
   14350 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14351 
   14352 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14353 }
   14354 
   14355 static int
   14356 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14357 {
   14358 	uint32_t fwsm, reg;
   14359 	int rv = 0;
   14360 
   14361 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14362 		device_xname(sc->sc_dev), __func__));
   14363 
   14364 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14365 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14366 
   14367 	/* Disable ULP */
   14368 	wm_ulp_disable(sc);
   14369 
   14370 	/* Acquire PHY semaphore */
   14371 	rv = sc->phy.acquire(sc);
   14372 	if (rv != 0) {
   14373 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14374 		device_xname(sc->sc_dev), __func__));
   14375 		return -1;
   14376 	}
   14377 
   14378 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14379 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14380 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14381 	 */
   14382 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14383 	switch (sc->sc_type) {
   14384 	case WM_T_PCH_LPT:
   14385 	case WM_T_PCH_SPT:
   14386 	case WM_T_PCH_CNP:
   14387 		if (wm_phy_is_accessible_pchlan(sc))
   14388 			break;
   14389 
   14390 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14391 		 * forcing MAC to SMBus mode first.
   14392 		 */
   14393 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14394 		reg |= CTRL_EXT_FORCE_SMBUS;
   14395 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14396 #if 0
   14397 		/* XXX Isn't this required??? */
   14398 		CSR_WRITE_FLUSH(sc);
   14399 #endif
   14400 		/* Wait 50 milliseconds for MAC to finish any retries
   14401 		 * that it might be trying to perform from previous
   14402 		 * attempts to acknowledge any phy read requests.
   14403 		 */
   14404 		delay(50 * 1000);
   14405 		/* FALLTHROUGH */
   14406 	case WM_T_PCH2:
   14407 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14408 			break;
   14409 		/* FALLTHROUGH */
   14410 	case WM_T_PCH:
   14411 		if (sc->sc_type == WM_T_PCH)
   14412 			if ((fwsm & FWSM_FW_VALID) != 0)
   14413 				break;
   14414 
   14415 		if (wm_phy_resetisblocked(sc) == true) {
   14416 			printf("XXX reset is blocked(3)\n");
   14417 			break;
   14418 		}
   14419 
   14420 		/* Toggle LANPHYPC Value bit */
   14421 		wm_toggle_lanphypc_pch_lpt(sc);
   14422 
   14423 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14424 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14425 				break;
   14426 
   14427 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14428 			 * so ensure that the MAC is also out of SMBus mode
   14429 			 */
   14430 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14431 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14432 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14433 
   14434 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14435 				break;
   14436 			rv = -1;
   14437 		}
   14438 		break;
   14439 	default:
   14440 		break;
   14441 	}
   14442 
   14443 	/* Release semaphore */
   14444 	sc->phy.release(sc);
   14445 
   14446 	if (rv == 0) {
   14447 		/* Check to see if able to reset PHY.  Print error if not */
   14448 		if (wm_phy_resetisblocked(sc)) {
   14449 			printf("XXX reset is blocked(4)\n");
   14450 			goto out;
   14451 		}
   14452 
   14453 		/* Reset the PHY before any access to it.  Doing so, ensures
   14454 		 * that the PHY is in a known good state before we read/write
   14455 		 * PHY registers.  The generic reset is sufficient here,
   14456 		 * because we haven't determined the PHY type yet.
   14457 		 */
   14458 		if (wm_reset_phy(sc) != 0)
   14459 			goto out;
   14460 
   14461 		/* On a successful reset, possibly need to wait for the PHY
   14462 		 * to quiesce to an accessible state before returning control
   14463 		 * to the calling function.  If the PHY does not quiesce, then
   14464 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14465 		 *  the PHY is in.
   14466 		 */
   14467 		if (wm_phy_resetisblocked(sc))
   14468 			printf("XXX reset is blocked(4)\n");
   14469 	}
   14470 
   14471 out:
   14472 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14473 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14474 		delay(10*1000);
   14475 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14476 	}
   14477 
   14478 	return 0;
   14479 }
   14480 
   14481 static void
   14482 wm_init_manageability(struct wm_softc *sc)
   14483 {
   14484 
   14485 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14486 		device_xname(sc->sc_dev), __func__));
   14487 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14488 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14489 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14490 
   14491 		/* Disable hardware interception of ARP */
   14492 		manc &= ~MANC_ARP_EN;
   14493 
   14494 		/* Enable receiving management packets to the host */
   14495 		if (sc->sc_type >= WM_T_82571) {
   14496 			manc |= MANC_EN_MNG2HOST;
   14497 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14498 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14499 		}
   14500 
   14501 		CSR_WRITE(sc, WMREG_MANC, manc);
   14502 	}
   14503 }
   14504 
   14505 static void
   14506 wm_release_manageability(struct wm_softc *sc)
   14507 {
   14508 
   14509 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14510 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14511 
   14512 		manc |= MANC_ARP_EN;
   14513 		if (sc->sc_type >= WM_T_82571)
   14514 			manc &= ~MANC_EN_MNG2HOST;
   14515 
   14516 		CSR_WRITE(sc, WMREG_MANC, manc);
   14517 	}
   14518 }
   14519 
   14520 static void
   14521 wm_get_wakeup(struct wm_softc *sc)
   14522 {
   14523 
   14524 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14525 	switch (sc->sc_type) {
   14526 	case WM_T_82573:
   14527 	case WM_T_82583:
   14528 		sc->sc_flags |= WM_F_HAS_AMT;
   14529 		/* FALLTHROUGH */
   14530 	case WM_T_80003:
   14531 	case WM_T_82575:
   14532 	case WM_T_82576:
   14533 	case WM_T_82580:
   14534 	case WM_T_I350:
   14535 	case WM_T_I354:
   14536 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14537 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14538 		/* FALLTHROUGH */
   14539 	case WM_T_82541:
   14540 	case WM_T_82541_2:
   14541 	case WM_T_82547:
   14542 	case WM_T_82547_2:
   14543 	case WM_T_82571:
   14544 	case WM_T_82572:
   14545 	case WM_T_82574:
   14546 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14547 		break;
   14548 	case WM_T_ICH8:
   14549 	case WM_T_ICH9:
   14550 	case WM_T_ICH10:
   14551 	case WM_T_PCH:
   14552 	case WM_T_PCH2:
   14553 	case WM_T_PCH_LPT:
   14554 	case WM_T_PCH_SPT:
   14555 	case WM_T_PCH_CNP:
   14556 		sc->sc_flags |= WM_F_HAS_AMT;
   14557 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14558 		break;
   14559 	default:
   14560 		break;
   14561 	}
   14562 
   14563 	/* 1: HAS_MANAGE */
   14564 	if (wm_enable_mng_pass_thru(sc) != 0)
   14565 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14566 
   14567 	/*
   14568 	 * Note that the WOL flags is set after the resetting of the eeprom
   14569 	 * stuff
   14570 	 */
   14571 }
   14572 
   14573 /*
   14574  * Unconfigure Ultra Low Power mode.
   14575  * Only for I217 and newer (see below).
   14576  */
   14577 static int
   14578 wm_ulp_disable(struct wm_softc *sc)
   14579 {
   14580 	uint32_t reg;
   14581 	uint16_t phyreg;
   14582 	int i = 0, rv = 0;
   14583 
   14584 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14585 		device_xname(sc->sc_dev), __func__));
   14586 	/* Exclude old devices */
   14587 	if ((sc->sc_type < WM_T_PCH_LPT)
   14588 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14589 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14590 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14591 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14592 		return 0;
   14593 
   14594 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14595 		/* Request ME un-configure ULP mode in the PHY */
   14596 		reg = CSR_READ(sc, WMREG_H2ME);
   14597 		reg &= ~H2ME_ULP;
   14598 		reg |= H2ME_ENFORCE_SETTINGS;
   14599 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14600 
   14601 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14602 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14603 			if (i++ == 30) {
   14604 				printf("%s timed out\n", __func__);
   14605 				return -1;
   14606 			}
   14607 			delay(10 * 1000);
   14608 		}
   14609 		reg = CSR_READ(sc, WMREG_H2ME);
   14610 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14611 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14612 
   14613 		return 0;
   14614 	}
   14615 
   14616 	/* Acquire semaphore */
   14617 	rv = sc->phy.acquire(sc);
   14618 	if (rv != 0) {
   14619 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14620 		device_xname(sc->sc_dev), __func__));
   14621 		return -1;
   14622 	}
   14623 
   14624 	/* Toggle LANPHYPC */
   14625 	wm_toggle_lanphypc_pch_lpt(sc);
   14626 
   14627 	/* Unforce SMBus mode in PHY */
   14628 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14629 	if (rv != 0) {
   14630 		uint32_t reg2;
   14631 
   14632 		printf("%s: Force SMBus first.\n", __func__);
   14633 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14634 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14635 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14636 		delay(50 * 1000);
   14637 
   14638 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14639 		    &phyreg);
   14640 		if (rv != 0)
   14641 			goto release;
   14642 	}
   14643 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14644 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14645 
   14646 	/* Unforce SMBus mode in MAC */
   14647 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14648 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14649 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14650 
   14651 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14652 	if (rv != 0)
   14653 		goto release;
   14654 	phyreg |= HV_PM_CTRL_K1_ENA;
   14655 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14656 
   14657 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14658 		&phyreg);
   14659 	if (rv != 0)
   14660 		goto release;
   14661 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14662 	    | I218_ULP_CONFIG1_STICKY_ULP
   14663 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14664 	    | I218_ULP_CONFIG1_WOL_HOST
   14665 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14666 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14667 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14668 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14669 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14670 	phyreg |= I218_ULP_CONFIG1_START;
   14671 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14672 
   14673 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14674 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14675 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14676 
   14677 release:
   14678 	/* Release semaphore */
   14679 	sc->phy.release(sc);
   14680 	wm_gmii_reset(sc);
   14681 	delay(50 * 1000);
   14682 
   14683 	return rv;
   14684 }
   14685 
   14686 /* WOL in the newer chipset interfaces (pchlan) */
   14687 static int
   14688 wm_enable_phy_wakeup(struct wm_softc *sc)
   14689 {
   14690 	device_t dev = sc->sc_dev;
   14691 	uint32_t mreg, moff;
   14692 	uint16_t wuce, wuc, wufc, preg;
   14693 	int i, rv;
   14694 
   14695 	KASSERT(sc->sc_type >= WM_T_PCH);
   14696 
   14697 	/* Copy MAC RARs to PHY RARs */
   14698 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14699 
   14700 	/* Activate PHY wakeup */
   14701 	rv = sc->phy.acquire(sc);
   14702 	if (rv != 0) {
   14703 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14704 		    __func__);
   14705 		return rv;
   14706 	}
   14707 
   14708 	/*
   14709 	 * Enable access to PHY wakeup registers.
   14710 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14711 	 */
   14712 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14713 	if (rv != 0) {
   14714 		device_printf(dev,
   14715 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14716 		goto release;
   14717 	}
   14718 
   14719 	/* Copy MAC MTA to PHY MTA */
   14720 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14721 		uint16_t lo, hi;
   14722 
   14723 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14724 		lo = (uint16_t)(mreg & 0xffff);
   14725 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14726 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14727 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14728 	}
   14729 
   14730 	/* Configure PHY Rx Control register */
   14731 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14732 	mreg = CSR_READ(sc, WMREG_RCTL);
   14733 	if (mreg & RCTL_UPE)
   14734 		preg |= BM_RCTL_UPE;
   14735 	if (mreg & RCTL_MPE)
   14736 		preg |= BM_RCTL_MPE;
   14737 	preg &= ~(BM_RCTL_MO_MASK);
   14738 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14739 	if (moff != 0)
   14740 		preg |= moff << BM_RCTL_MO_SHIFT;
   14741 	if (mreg & RCTL_BAM)
   14742 		preg |= BM_RCTL_BAM;
   14743 	if (mreg & RCTL_PMCF)
   14744 		preg |= BM_RCTL_PMCF;
   14745 	mreg = CSR_READ(sc, WMREG_CTRL);
   14746 	if (mreg & CTRL_RFCE)
   14747 		preg |= BM_RCTL_RFCE;
   14748 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14749 
   14750 	wuc = WUC_APME | WUC_PME_EN;
   14751 	wufc = WUFC_MAG;
   14752 	/* Enable PHY wakeup in MAC register */
   14753 	CSR_WRITE(sc, WMREG_WUC,
   14754 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14755 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14756 
   14757 	/* Configure and enable PHY wakeup in PHY registers */
   14758 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14759 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14760 
   14761 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14762 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14763 
   14764 release:
   14765 	sc->phy.release(sc);
   14766 
   14767 	return 0;
   14768 }
   14769 
   14770 /* Power down workaround on D3 */
   14771 static void
   14772 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14773 {
   14774 	uint32_t reg;
   14775 	uint16_t phyreg;
   14776 	int i;
   14777 
   14778 	for (i = 0; i < 2; i++) {
   14779 		/* Disable link */
   14780 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14781 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14782 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14783 
   14784 		/*
   14785 		 * Call gig speed drop workaround on Gig disable before
   14786 		 * accessing any PHY registers
   14787 		 */
   14788 		if (sc->sc_type == WM_T_ICH8)
   14789 			wm_gig_downshift_workaround_ich8lan(sc);
   14790 
   14791 		/* Write VR power-down enable */
   14792 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14793 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14794 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14795 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14796 
   14797 		/* Read it back and test */
   14798 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14799 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14800 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14801 			break;
   14802 
   14803 		/* Issue PHY reset and repeat at most one more time */
   14804 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14805 	}
   14806 }
   14807 
   14808 /*
   14809  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14810  *  @sc: pointer to the HW structure
   14811  *
   14812  *  During S0 to Sx transition, it is possible the link remains at gig
   14813  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14814  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14815  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14816  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14817  *  needs to be written.
   14818  *  Parts that support (and are linked to a partner which support) EEE in
   14819  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14820  *  than 10Mbps w/o EEE.
   14821  */
   14822 static void
   14823 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14824 {
   14825 	device_t dev = sc->sc_dev;
   14826 	struct ethercom *ec = &sc->sc_ethercom;
   14827 	uint32_t phy_ctrl;
   14828 	int rv;
   14829 
   14830 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14831 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14832 
   14833 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14834 
   14835 	if (sc->sc_phytype == WMPHY_I217) {
   14836 		uint16_t devid = sc->sc_pcidevid;
   14837 
   14838 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14839 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14840 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14841 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14842 		    (sc->sc_type >= WM_T_PCH_SPT))
   14843 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14844 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14845 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14846 
   14847 		if (sc->phy.acquire(sc) != 0)
   14848 			goto out;
   14849 
   14850 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14851 			uint16_t eee_advert;
   14852 
   14853 			rv = wm_read_emi_reg_locked(dev,
   14854 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14855 			if (rv)
   14856 				goto release;
   14857 
   14858 			/*
   14859 			 * Disable LPLU if both link partners support 100BaseT
   14860 			 * EEE and 100Full is advertised on both ends of the
   14861 			 * link, and enable Auto Enable LPI since there will
   14862 			 * be no driver to enable LPI while in Sx.
   14863 			 */
   14864 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14865 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14866 				uint16_t anar, phy_reg;
   14867 
   14868 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14869 				    &anar);
   14870 				if (anar & ANAR_TX_FD) {
   14871 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14872 					    PHY_CTRL_NOND0A_LPLU);
   14873 
   14874 					/* Set Auto Enable LPI after link up */
   14875 					sc->phy.readreg_locked(dev, 2,
   14876 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14877 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14878 					sc->phy.writereg_locked(dev, 2,
   14879 					    I217_LPI_GPIO_CTRL, phy_reg);
   14880 				}
   14881 			}
   14882 		}
   14883 
   14884 		/*
   14885 		 * For i217 Intel Rapid Start Technology support,
   14886 		 * when the system is going into Sx and no manageability engine
   14887 		 * is present, the driver must configure proxy to reset only on
   14888 		 * power good.	LPI (Low Power Idle) state must also reset only
   14889 		 * on power good, as well as the MTA (Multicast table array).
   14890 		 * The SMBus release must also be disabled on LCD reset.
   14891 		 */
   14892 
   14893 		/*
   14894 		 * Enable MTA to reset for Intel Rapid Start Technology
   14895 		 * Support
   14896 		 */
   14897 
   14898 release:
   14899 		sc->phy.release(sc);
   14900 	}
   14901 out:
   14902 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14903 
   14904 	if (sc->sc_type == WM_T_ICH8)
   14905 		wm_gig_downshift_workaround_ich8lan(sc);
   14906 
   14907 	if (sc->sc_type >= WM_T_PCH) {
   14908 		wm_oem_bits_config_ich8lan(sc, false);
   14909 
   14910 		/* Reset PHY to activate OEM bits on 82577/8 */
   14911 		if (sc->sc_type == WM_T_PCH)
   14912 			wm_reset_phy(sc);
   14913 
   14914 		if (sc->phy.acquire(sc) != 0)
   14915 			return;
   14916 		wm_write_smbus_addr(sc);
   14917 		sc->phy.release(sc);
   14918 	}
   14919 }
   14920 
   14921 /*
   14922  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14923  *  @sc: pointer to the HW structure
   14924  *
   14925  *  During Sx to S0 transitions on non-managed devices or managed devices
   14926  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14927  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14928  *  the PHY.
   14929  *  On i217, setup Intel Rapid Start Technology.
   14930  */
   14931 static int
   14932 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14933 {
   14934 	device_t dev = sc->sc_dev;
   14935 	int rv;
   14936 
   14937 	if (sc->sc_type < WM_T_PCH2)
   14938 		return 0;
   14939 
   14940 	rv = wm_init_phy_workarounds_pchlan(sc);
   14941 	if (rv != 0)
   14942 		return -1;
   14943 
   14944 	/* For i217 Intel Rapid Start Technology support when the system
   14945 	 * is transitioning from Sx and no manageability engine is present
   14946 	 * configure SMBus to restore on reset, disable proxy, and enable
   14947 	 * the reset on MTA (Multicast table array).
   14948 	 */
   14949 	if (sc->sc_phytype == WMPHY_I217) {
   14950 		uint16_t phy_reg;
   14951 
   14952 		if (sc->phy.acquire(sc) != 0)
   14953 			return -1;
   14954 
   14955 		/* Clear Auto Enable LPI after link up */
   14956 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14957 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14958 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14959 
   14960 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14961 			/* Restore clear on SMB if no manageability engine
   14962 			 * is present
   14963 			 */
   14964 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14965 			    &phy_reg);
   14966 			if (rv != 0)
   14967 				goto release;
   14968 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14969 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14970 
   14971 			/* Disable Proxy */
   14972 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14973 		}
   14974 		/* Enable reset on MTA */
   14975 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14976 		if (rv != 0)
   14977 			goto release;
   14978 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14979 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14980 
   14981 release:
   14982 		sc->phy.release(sc);
   14983 		return rv;
   14984 	}
   14985 
   14986 	return 0;
   14987 }
   14988 
   14989 static void
   14990 wm_enable_wakeup(struct wm_softc *sc)
   14991 {
   14992 	uint32_t reg, pmreg;
   14993 	pcireg_t pmode;
   14994 	int rv = 0;
   14995 
   14996 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14997 		device_xname(sc->sc_dev), __func__));
   14998 
   14999 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15000 	    &pmreg, NULL) == 0)
   15001 		return;
   15002 
   15003 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15004 		goto pme;
   15005 
   15006 	/* Advertise the wakeup capability */
   15007 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15008 	    | CTRL_SWDPIN(3));
   15009 
   15010 	/* Keep the laser running on fiber adapters */
   15011 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15012 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15013 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15014 		reg |= CTRL_EXT_SWDPIN(3);
   15015 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15016 	}
   15017 
   15018 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15019 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15020 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15021 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15022 		wm_suspend_workarounds_ich8lan(sc);
   15023 
   15024 #if 0	/* for the multicast packet */
   15025 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15026 	reg |= WUFC_MC;
   15027 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15028 #endif
   15029 
   15030 	if (sc->sc_type >= WM_T_PCH) {
   15031 		rv = wm_enable_phy_wakeup(sc);
   15032 		if (rv != 0)
   15033 			goto pme;
   15034 	} else {
   15035 		/* Enable wakeup by the MAC */
   15036 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15037 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15038 	}
   15039 
   15040 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15041 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15042 		|| (sc->sc_type == WM_T_PCH2))
   15043 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15044 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15045 
   15046 pme:
   15047 	/* Request PME */
   15048 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15049 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15050 		/* For WOL */
   15051 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15052 	} else {
   15053 		/* Disable WOL */
   15054 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15055 	}
   15056 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15057 }
   15058 
   15059 /* Disable ASPM L0s and/or L1 for workaround */
   15060 static void
   15061 wm_disable_aspm(struct wm_softc *sc)
   15062 {
   15063 	pcireg_t reg, mask = 0;
   15064 	unsigned const char *str = "";
   15065 
   15066 	/*
   15067 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15068 	 * space.
   15069 	 */
   15070 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15071 		return;
   15072 
   15073 	switch (sc->sc_type) {
   15074 	case WM_T_82571:
   15075 	case WM_T_82572:
   15076 		/*
   15077 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15078 		 * State Power management L1 State (ASPM L1).
   15079 		 */
   15080 		mask = PCIE_LCSR_ASPM_L1;
   15081 		str = "L1 is";
   15082 		break;
   15083 	case WM_T_82573:
   15084 	case WM_T_82574:
   15085 	case WM_T_82583:
   15086 		/*
   15087 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15088 		 *
   15089 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15090 		 * some chipset.  The document of 82574 and 82583 says that
   15091 		 * disabling L0s with some specific chipset is sufficient,
   15092 		 * but we follow as of the Intel em driver does.
   15093 		 *
   15094 		 * References:
   15095 		 * Errata 8 of the Specification Update of i82573.
   15096 		 * Errata 20 of the Specification Update of i82574.
   15097 		 * Errata 9 of the Specification Update of i82583.
   15098 		 */
   15099 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15100 		str = "L0s and L1 are";
   15101 		break;
   15102 	default:
   15103 		return;
   15104 	}
   15105 
   15106 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15107 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15108 	reg &= ~mask;
   15109 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15110 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15111 
   15112 	/* Print only in wm_attach() */
   15113 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15114 		aprint_verbose_dev(sc->sc_dev,
   15115 		    "ASPM %s disabled to workaround the errata.\n", str);
   15116 }
   15117 
   15118 /* LPLU */
   15119 
   15120 static void
   15121 wm_lplu_d0_disable(struct wm_softc *sc)
   15122 {
   15123 	struct mii_data *mii = &sc->sc_mii;
   15124 	uint32_t reg;
   15125 	uint16_t phyval;
   15126 
   15127 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15128 		device_xname(sc->sc_dev), __func__));
   15129 
   15130 	if (sc->sc_phytype == WMPHY_IFE)
   15131 		return;
   15132 
   15133 	switch (sc->sc_type) {
   15134 	case WM_T_82571:
   15135 	case WM_T_82572:
   15136 	case WM_T_82573:
   15137 	case WM_T_82575:
   15138 	case WM_T_82576:
   15139 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15140 		phyval &= ~PMR_D0_LPLU;
   15141 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15142 		break;
   15143 	case WM_T_82580:
   15144 	case WM_T_I350:
   15145 	case WM_T_I210:
   15146 	case WM_T_I211:
   15147 		reg = CSR_READ(sc, WMREG_PHPM);
   15148 		reg &= ~PHPM_D0A_LPLU;
   15149 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15150 		break;
   15151 	case WM_T_82574:
   15152 	case WM_T_82583:
   15153 	case WM_T_ICH8:
   15154 	case WM_T_ICH9:
   15155 	case WM_T_ICH10:
   15156 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15157 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15158 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15159 		CSR_WRITE_FLUSH(sc);
   15160 		break;
   15161 	case WM_T_PCH:
   15162 	case WM_T_PCH2:
   15163 	case WM_T_PCH_LPT:
   15164 	case WM_T_PCH_SPT:
   15165 	case WM_T_PCH_CNP:
   15166 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15167 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15168 		if (wm_phy_resetisblocked(sc) == false)
   15169 			phyval |= HV_OEM_BITS_ANEGNOW;
   15170 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15171 		break;
   15172 	default:
   15173 		break;
   15174 	}
   15175 }
   15176 
   15177 /* EEE */
   15178 
   15179 static int
   15180 wm_set_eee_i350(struct wm_softc *sc)
   15181 {
   15182 	struct ethercom *ec = &sc->sc_ethercom;
   15183 	uint32_t ipcnfg, eeer;
   15184 	uint32_t ipcnfg_mask
   15185 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15186 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15187 
   15188 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15189 
   15190 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15191 	eeer = CSR_READ(sc, WMREG_EEER);
   15192 
   15193 	/* enable or disable per user setting */
   15194 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15195 		ipcnfg |= ipcnfg_mask;
   15196 		eeer |= eeer_mask;
   15197 	} else {
   15198 		ipcnfg &= ~ipcnfg_mask;
   15199 		eeer &= ~eeer_mask;
   15200 	}
   15201 
   15202 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15203 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15204 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15205 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15206 
   15207 	return 0;
   15208 }
   15209 
   15210 static int
   15211 wm_set_eee_pchlan(struct wm_softc *sc)
   15212 {
   15213 	device_t dev = sc->sc_dev;
   15214 	struct ethercom *ec = &sc->sc_ethercom;
   15215 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15216 	int rv = 0;
   15217 
   15218 	switch (sc->sc_phytype) {
   15219 	case WMPHY_82579:
   15220 		lpa = I82579_EEE_LP_ABILITY;
   15221 		pcs_status = I82579_EEE_PCS_STATUS;
   15222 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15223 		break;
   15224 	case WMPHY_I217:
   15225 		lpa = I217_EEE_LP_ABILITY;
   15226 		pcs_status = I217_EEE_PCS_STATUS;
   15227 		adv_addr = I217_EEE_ADVERTISEMENT;
   15228 		break;
   15229 	default:
   15230 		return 0;
   15231 	}
   15232 
   15233 	if (sc->phy.acquire(sc)) {
   15234 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15235 		return 0;
   15236 	}
   15237 
   15238 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15239 	if (rv != 0)
   15240 		goto release;
   15241 
   15242 	/* Clear bits that enable EEE in various speeds */
   15243 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15244 
   15245 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15246 		/* Save off link partner's EEE ability */
   15247 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15248 		if (rv != 0)
   15249 			goto release;
   15250 
   15251 		/* Read EEE advertisement */
   15252 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15253 			goto release;
   15254 
   15255 		/*
   15256 		 * Enable EEE only for speeds in which the link partner is
   15257 		 * EEE capable and for which we advertise EEE.
   15258 		 */
   15259 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15260 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15261 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15262 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15263 			if ((data & ANLPAR_TX_FD) != 0)
   15264 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15265 			else {
   15266 				/*
   15267 				 * EEE is not supported in 100Half, so ignore
   15268 				 * partner's EEE in 100 ability if full-duplex
   15269 				 * is not advertised.
   15270 				 */
   15271 				sc->eee_lp_ability
   15272 				    &= ~AN_EEEADVERT_100_TX;
   15273 			}
   15274 		}
   15275 	}
   15276 
   15277 	if (sc->sc_phytype == WMPHY_82579) {
   15278 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15279 		if (rv != 0)
   15280 			goto release;
   15281 
   15282 		data &= ~I82579_LPI_PLL_SHUT_100;
   15283 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15284 	}
   15285 
   15286 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15287 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15288 		goto release;
   15289 
   15290 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15291 release:
   15292 	sc->phy.release(sc);
   15293 
   15294 	return rv;
   15295 }
   15296 
   15297 static int
   15298 wm_set_eee(struct wm_softc *sc)
   15299 {
   15300 	struct ethercom *ec = &sc->sc_ethercom;
   15301 
   15302 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15303 		return 0;
   15304 
   15305 	if (sc->sc_type == WM_T_I354) {
   15306 		/* I354 uses an external PHY */
   15307 		return 0; /* not yet */
   15308 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15309 		return wm_set_eee_i350(sc);
   15310 	else if (sc->sc_type >= WM_T_PCH2)
   15311 		return wm_set_eee_pchlan(sc);
   15312 
   15313 	return 0;
   15314 }
   15315 
   15316 /*
   15317  * Workarounds (mainly PHY related).
   15318  * Basically, PHY's workarounds are in the PHY drivers.
   15319  */
   15320 
   15321 /* Work-around for 82566 Kumeran PCS lock loss */
   15322 static int
   15323 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15324 {
   15325 	struct mii_data *mii = &sc->sc_mii;
   15326 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15327 	int i, reg, rv;
   15328 	uint16_t phyreg;
   15329 
   15330 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15331 		device_xname(sc->sc_dev), __func__));
   15332 
   15333 	/* If the link is not up, do nothing */
   15334 	if ((status & STATUS_LU) == 0)
   15335 		return 0;
   15336 
   15337 	/* Nothing to do if the link is other than 1Gbps */
   15338 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15339 		return 0;
   15340 
   15341 	for (i = 0; i < 10; i++) {
   15342 		/* read twice */
   15343 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15344 		if (rv != 0)
   15345 			return rv;
   15346 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15347 		if (rv != 0)
   15348 			return rv;
   15349 
   15350 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15351 			goto out;	/* GOOD! */
   15352 
   15353 		/* Reset the PHY */
   15354 		wm_reset_phy(sc);
   15355 		delay(5*1000);
   15356 	}
   15357 
   15358 	/* Disable GigE link negotiation */
   15359 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15360 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15361 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15362 
   15363 	/*
   15364 	 * Call gig speed drop workaround on Gig disable before accessing
   15365 	 * any PHY registers.
   15366 	 */
   15367 	wm_gig_downshift_workaround_ich8lan(sc);
   15368 
   15369 out:
   15370 	return 0;
   15371 }
   15372 
   15373 /*
   15374  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15375  *  @sc: pointer to the HW structure
   15376  *
   15377  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15378  *  LPLU, Gig disable, MDIC PHY reset):
   15379  *    1) Set Kumeran Near-end loopback
   15380  *    2) Clear Kumeran Near-end loopback
   15381  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15382  */
   15383 static void
   15384 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15385 {
   15386 	uint16_t kmreg;
   15387 
   15388 	/* Only for igp3 */
   15389 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15390 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15391 			return;
   15392 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15393 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15394 			return;
   15395 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15396 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15397 	}
   15398 }
   15399 
   15400 /*
   15401  * Workaround for pch's PHYs
   15402  * XXX should be moved to new PHY driver?
   15403  */
   15404 static int
   15405 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15406 {
   15407 	device_t dev = sc->sc_dev;
   15408 	struct mii_data *mii = &sc->sc_mii;
   15409 	struct mii_softc *child;
   15410 	uint16_t phy_data, phyrev = 0;
   15411 	int phytype = sc->sc_phytype;
   15412 	int rv;
   15413 
   15414 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15415 		device_xname(dev), __func__));
   15416 	KASSERT(sc->sc_type == WM_T_PCH);
   15417 
   15418 	/* Set MDIO slow mode before any other MDIO access */
   15419 	if (phytype == WMPHY_82577)
   15420 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15421 			return rv;
   15422 
   15423 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15424 	if (child != NULL)
   15425 		phyrev = child->mii_mpd_rev;
   15426 
   15427 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15428 	if ((child != NULL) &&
   15429 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15430 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15431 		/* Disable generation of early preamble (0x4431) */
   15432 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15433 		    &phy_data);
   15434 		if (rv != 0)
   15435 			return rv;
   15436 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15437 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15438 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15439 		    phy_data);
   15440 		if (rv != 0)
   15441 			return rv;
   15442 
   15443 		/* Preamble tuning for SSC */
   15444 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15445 		if (rv != 0)
   15446 			return rv;
   15447 	}
   15448 
   15449 	/* 82578 */
   15450 	if (phytype == WMPHY_82578) {
   15451 		/*
   15452 		 * Return registers to default by doing a soft reset then
   15453 		 * writing 0x3140 to the control register
   15454 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15455 		 */
   15456 		if ((child != NULL) && (phyrev < 2)) {
   15457 			PHY_RESET(child);
   15458 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15459 			    0x3140);
   15460 			if (rv != 0)
   15461 				return rv;
   15462 		}
   15463 	}
   15464 
   15465 	/* Select page 0 */
   15466 	if ((rv = sc->phy.acquire(sc)) != 0)
   15467 		return rv;
   15468 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15469 	sc->phy.release(sc);
   15470 	if (rv != 0)
   15471 		return rv;
   15472 
   15473 	/*
   15474 	 * Configure the K1 Si workaround during phy reset assuming there is
   15475 	 * link so that it disables K1 if link is in 1Gbps.
   15476 	 */
   15477 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15478 		return rv;
   15479 
   15480 	/* Workaround for link disconnects on a busy hub in half duplex */
   15481 	rv = sc->phy.acquire(sc);
   15482 	if (rv)
   15483 		return rv;
   15484 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15485 	if (rv)
   15486 		goto release;
   15487 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15488 	    phy_data & 0x00ff);
   15489 	if (rv)
   15490 		goto release;
   15491 
   15492 	/* set MSE higher to enable link to stay up when noise is high */
   15493 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15494 release:
   15495 	sc->phy.release(sc);
   15496 
   15497 	return rv;
   15498 
   15499 
   15500 }
   15501 
   15502 /*
   15503  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15504  *  @sc:   pointer to the HW structure
   15505  */
   15506 static void
   15507 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15508 {
   15509 	device_t dev = sc->sc_dev;
   15510 	uint32_t mac_reg;
   15511 	uint16_t i, wuce;
   15512 	int count;
   15513 
   15514 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15515 		device_xname(sc->sc_dev), __func__));
   15516 
   15517 	if (sc->phy.acquire(sc) != 0)
   15518 		return;
   15519 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15520 		goto release;
   15521 
   15522 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15523 	count = wm_rar_count(sc);
   15524 	for (i = 0; i < count; i++) {
   15525 		uint16_t lo, hi;
   15526 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15527 		lo = (uint16_t)(mac_reg & 0xffff);
   15528 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15529 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15530 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15531 
   15532 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15533 		lo = (uint16_t)(mac_reg & 0xffff);
   15534 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15535 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15536 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15537 	}
   15538 
   15539 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15540 
   15541 release:
   15542 	sc->phy.release(sc);
   15543 }
   15544 
   15545 /*
   15546  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15547  *  done after every PHY reset.
   15548  */
   15549 static int
   15550 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15551 {
   15552 	device_t dev = sc->sc_dev;
   15553 	int rv;
   15554 
   15555 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15556 		device_xname(dev), __func__));
   15557 	KASSERT(sc->sc_type == WM_T_PCH2);
   15558 
   15559 	/* Set MDIO slow mode before any other MDIO access */
   15560 	rv = wm_set_mdio_slow_mode_hv(sc);
   15561 	if (rv != 0)
   15562 		return rv;
   15563 
   15564 	rv = sc->phy.acquire(sc);
   15565 	if (rv != 0)
   15566 		return rv;
   15567 	/* set MSE higher to enable link to stay up when noise is high */
   15568 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15569 	if (rv != 0)
   15570 		goto release;
   15571 	/* drop link after 5 times MSE threshold was reached */
   15572 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15573 release:
   15574 	sc->phy.release(sc);
   15575 
   15576 	return rv;
   15577 }
   15578 
   15579 /**
   15580  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15581  *  @link: link up bool flag
   15582  *
   15583  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15584  *  preventing further DMA write requests.  Workaround the issue by disabling
   15585  *  the de-assertion of the clock request when in 1Gpbs mode.
   15586  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15587  *  speeds in order to avoid Tx hangs.
   15588  **/
   15589 static int
   15590 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15591 {
   15592 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15593 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15594 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15595 	uint16_t phyreg;
   15596 
   15597 	if (link && (speed == STATUS_SPEED_1000)) {
   15598 		sc->phy.acquire(sc);
   15599 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15600 		    &phyreg);
   15601 		if (rv != 0)
   15602 			goto release;
   15603 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15604 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15605 		if (rv != 0)
   15606 			goto release;
   15607 		delay(20);
   15608 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15609 
   15610 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15611 		    &phyreg);
   15612 release:
   15613 		sc->phy.release(sc);
   15614 		return rv;
   15615 	}
   15616 
   15617 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15618 
   15619 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15620 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15621 	    || !link
   15622 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15623 		goto update_fextnvm6;
   15624 
   15625 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15626 
   15627 	/* Clear link status transmit timeout */
   15628 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15629 	if (speed == STATUS_SPEED_100) {
   15630 		/* Set inband Tx timeout to 5x10us for 100Half */
   15631 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15632 
   15633 		/* Do not extend the K1 entry latency for 100Half */
   15634 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15635 	} else {
   15636 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15637 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15638 
   15639 		/* Extend the K1 entry latency for 10 Mbps */
   15640 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15641 	}
   15642 
   15643 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15644 
   15645 update_fextnvm6:
   15646 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15647 	return 0;
   15648 }
   15649 
   15650 /*
   15651  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15652  *  @sc:   pointer to the HW structure
   15653  *  @link: link up bool flag
   15654  *
   15655  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15656  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15657  *  If link is down, the function will restore the default K1 setting located
   15658  *  in the NVM.
   15659  */
   15660 static int
   15661 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15662 {
   15663 	int k1_enable = sc->sc_nvm_k1_enabled;
   15664 
   15665 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15666 		device_xname(sc->sc_dev), __func__));
   15667 
   15668 	if (sc->phy.acquire(sc) != 0)
   15669 		return -1;
   15670 
   15671 	if (link) {
   15672 		k1_enable = 0;
   15673 
   15674 		/* Link stall fix for link up */
   15675 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15676 		    0x0100);
   15677 	} else {
   15678 		/* Link stall fix for link down */
   15679 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15680 		    0x4100);
   15681 	}
   15682 
   15683 	wm_configure_k1_ich8lan(sc, k1_enable);
   15684 	sc->phy.release(sc);
   15685 
   15686 	return 0;
   15687 }
   15688 
   15689 /*
   15690  *  wm_k1_workaround_lv - K1 Si workaround
   15691  *  @sc:   pointer to the HW structure
   15692  *
   15693  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15694  *  Disable K1 for 1000 and 100 speeds
   15695  */
   15696 static int
   15697 wm_k1_workaround_lv(struct wm_softc *sc)
   15698 {
   15699 	uint32_t reg;
   15700 	uint16_t phyreg;
   15701 	int rv;
   15702 
   15703 	if (sc->sc_type != WM_T_PCH2)
   15704 		return 0;
   15705 
   15706 	/* Set K1 beacon duration based on 10Mbps speed */
   15707 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15708 	if (rv != 0)
   15709 		return rv;
   15710 
   15711 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15712 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15713 		if (phyreg &
   15714 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15715 			/* LV 1G/100 Packet drop issue wa  */
   15716 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15717 			    &phyreg);
   15718 			if (rv != 0)
   15719 				return rv;
   15720 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15721 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15722 			    phyreg);
   15723 			if (rv != 0)
   15724 				return rv;
   15725 		} else {
   15726 			/* For 10Mbps */
   15727 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15728 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15729 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15730 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15731 		}
   15732 	}
   15733 
   15734 	return 0;
   15735 }
   15736 
   15737 /*
   15738  *  wm_link_stall_workaround_hv - Si workaround
   15739  *  @sc: pointer to the HW structure
   15740  *
   15741  *  This function works around a Si bug where the link partner can get
   15742  *  a link up indication before the PHY does. If small packets are sent
   15743  *  by the link partner they can be placed in the packet buffer without
   15744  *  being properly accounted for by the PHY and will stall preventing
   15745  *  further packets from being received.  The workaround is to clear the
   15746  *  packet buffer after the PHY detects link up.
   15747  */
   15748 static int
   15749 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15750 {
   15751 	uint16_t phyreg;
   15752 
   15753 	if (sc->sc_phytype != WMPHY_82578)
   15754 		return 0;
   15755 
   15756 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15757 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15758 	if ((phyreg & BMCR_LOOP) != 0)
   15759 		return 0;
   15760 
   15761 	/* check if link is up and at 1Gbps */
   15762 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15763 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15764 	    | BM_CS_STATUS_SPEED_MASK;
   15765 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15766 		| BM_CS_STATUS_SPEED_1000))
   15767 		return 0;
   15768 
   15769 	delay(200 * 1000);	/* XXX too big */
   15770 
   15771 	/* flush the packets in the fifo buffer */
   15772 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15773 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15774 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15775 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15776 
   15777 	return 0;
   15778 }
   15779 
   15780 static int
   15781 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15782 {
   15783 	int rv;
   15784 	uint16_t reg;
   15785 
   15786 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15787 	if (rv != 0)
   15788 		return rv;
   15789 
   15790 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15791 	    reg | HV_KMRN_MDIO_SLOW);
   15792 }
   15793 
   15794 /*
   15795  *  wm_configure_k1_ich8lan - Configure K1 power state
   15796  *  @sc: pointer to the HW structure
   15797  *  @enable: K1 state to configure
   15798  *
   15799  *  Configure the K1 power state based on the provided parameter.
   15800  *  Assumes semaphore already acquired.
   15801  */
   15802 static void
   15803 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15804 {
   15805 	uint32_t ctrl, ctrl_ext, tmp;
   15806 	uint16_t kmreg;
   15807 	int rv;
   15808 
   15809 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15810 
   15811 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15812 	if (rv != 0)
   15813 		return;
   15814 
   15815 	if (k1_enable)
   15816 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15817 	else
   15818 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15819 
   15820 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15821 	if (rv != 0)
   15822 		return;
   15823 
   15824 	delay(20);
   15825 
   15826 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15827 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15828 
   15829 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15830 	tmp |= CTRL_FRCSPD;
   15831 
   15832 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15833 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15834 	CSR_WRITE_FLUSH(sc);
   15835 	delay(20);
   15836 
   15837 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15838 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15839 	CSR_WRITE_FLUSH(sc);
   15840 	delay(20);
   15841 
   15842 	return;
   15843 }
   15844 
   15845 /* special case - for 82575 - need to do manual init ... */
   15846 static void
   15847 wm_reset_init_script_82575(struct wm_softc *sc)
   15848 {
   15849 	/*
   15850 	 * remark: this is untested code - we have no board without EEPROM
   15851 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15852 	 */
   15853 
   15854 	/* SerDes configuration via SERDESCTRL */
   15855 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15856 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15857 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15858 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15859 
   15860 	/* CCM configuration via CCMCTL register */
   15861 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15862 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15863 
   15864 	/* PCIe lanes configuration */
   15865 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15866 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15867 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15868 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15869 
   15870 	/* PCIe PLL Configuration */
   15871 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15872 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15873 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15874 }
   15875 
   15876 static void
   15877 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15878 {
   15879 	uint32_t reg;
   15880 	uint16_t nvmword;
   15881 	int rv;
   15882 
   15883 	if (sc->sc_type != WM_T_82580)
   15884 		return;
   15885 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15886 		return;
   15887 
   15888 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15889 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15890 	if (rv != 0) {
   15891 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15892 		    __func__);
   15893 		return;
   15894 	}
   15895 
   15896 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15897 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15898 		reg |= MDICNFG_DEST;
   15899 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15900 		reg |= MDICNFG_COM_MDIO;
   15901 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15902 }
   15903 
   15904 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15905 
   15906 static bool
   15907 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15908 {
   15909 	uint32_t reg;
   15910 	uint16_t id1, id2;
   15911 	int i, rv;
   15912 
   15913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15914 		device_xname(sc->sc_dev), __func__));
   15915 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15916 
   15917 	id1 = id2 = 0xffff;
   15918 	for (i = 0; i < 2; i++) {
   15919 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15920 		    &id1);
   15921 		if ((rv != 0) || MII_INVALIDID(id1))
   15922 			continue;
   15923 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15924 		    &id2);
   15925 		if ((rv != 0) || MII_INVALIDID(id2))
   15926 			continue;
   15927 		break;
   15928 	}
   15929 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15930 		goto out;
   15931 
   15932 	/*
   15933 	 * In case the PHY needs to be in mdio slow mode,
   15934 	 * set slow mode and try to get the PHY id again.
   15935 	 */
   15936 	rv = 0;
   15937 	if (sc->sc_type < WM_T_PCH_LPT) {
   15938 		sc->phy.release(sc);
   15939 		wm_set_mdio_slow_mode_hv(sc);
   15940 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15941 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15942 		sc->phy.acquire(sc);
   15943 	}
   15944 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15945 		printf("XXX return with false\n");
   15946 		return false;
   15947 	}
   15948 out:
   15949 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15950 		/* Only unforce SMBus if ME is not active */
   15951 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15952 			uint16_t phyreg;
   15953 
   15954 			/* Unforce SMBus mode in PHY */
   15955 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15956 			    CV_SMB_CTRL, &phyreg);
   15957 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15958 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15959 			    CV_SMB_CTRL, phyreg);
   15960 
   15961 			/* Unforce SMBus mode in MAC */
   15962 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15963 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15964 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15965 		}
   15966 	}
   15967 	return true;
   15968 }
   15969 
   15970 static void
   15971 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15972 {
   15973 	uint32_t reg;
   15974 	int i;
   15975 
   15976 	/* Set PHY Config Counter to 50msec */
   15977 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15978 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15979 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15980 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15981 
   15982 	/* Toggle LANPHYPC */
   15983 	reg = CSR_READ(sc, WMREG_CTRL);
   15984 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15985 	reg &= ~CTRL_LANPHYPC_VALUE;
   15986 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15987 	CSR_WRITE_FLUSH(sc);
   15988 	delay(1000);
   15989 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15990 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15991 	CSR_WRITE_FLUSH(sc);
   15992 
   15993 	if (sc->sc_type < WM_T_PCH_LPT)
   15994 		delay(50 * 1000);
   15995 	else {
   15996 		i = 20;
   15997 
   15998 		do {
   15999 			delay(5 * 1000);
   16000 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16001 		    && i--);
   16002 
   16003 		delay(30 * 1000);
   16004 	}
   16005 }
   16006 
   16007 static int
   16008 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16009 {
   16010 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16011 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16012 	uint32_t rxa;
   16013 	uint16_t scale = 0, lat_enc = 0;
   16014 	int32_t obff_hwm = 0;
   16015 	int64_t lat_ns, value;
   16016 
   16017 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16018 		device_xname(sc->sc_dev), __func__));
   16019 
   16020 	if (link) {
   16021 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16022 		uint32_t status;
   16023 		uint16_t speed;
   16024 		pcireg_t preg;
   16025 
   16026 		status = CSR_READ(sc, WMREG_STATUS);
   16027 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16028 		case STATUS_SPEED_10:
   16029 			speed = 10;
   16030 			break;
   16031 		case STATUS_SPEED_100:
   16032 			speed = 100;
   16033 			break;
   16034 		case STATUS_SPEED_1000:
   16035 			speed = 1000;
   16036 			break;
   16037 		default:
   16038 			device_printf(sc->sc_dev, "Unknown speed "
   16039 			    "(status = %08x)\n", status);
   16040 			return -1;
   16041 		}
   16042 
   16043 		/* Rx Packet Buffer Allocation size (KB) */
   16044 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16045 
   16046 		/*
   16047 		 * Determine the maximum latency tolerated by the device.
   16048 		 *
   16049 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16050 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16051 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16052 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16053 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16054 		 */
   16055 		lat_ns = ((int64_t)rxa * 1024 -
   16056 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16057 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16058 		if (lat_ns < 0)
   16059 			lat_ns = 0;
   16060 		else
   16061 			lat_ns /= speed;
   16062 		value = lat_ns;
   16063 
   16064 		while (value > LTRV_VALUE) {
   16065 			scale ++;
   16066 			value = howmany(value, __BIT(5));
   16067 		}
   16068 		if (scale > LTRV_SCALE_MAX) {
   16069 			printf("%s: Invalid LTR latency scale %d\n",
   16070 			    device_xname(sc->sc_dev), scale);
   16071 			return -1;
   16072 		}
   16073 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16074 
   16075 		/* Determine the maximum latency tolerated by the platform */
   16076 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16077 		    WM_PCI_LTR_CAP_LPT);
   16078 		max_snoop = preg & 0xffff;
   16079 		max_nosnoop = preg >> 16;
   16080 
   16081 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16082 
   16083 		if (lat_enc > max_ltr_enc) {
   16084 			lat_enc = max_ltr_enc;
   16085 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16086 			    * PCI_LTR_SCALETONS(
   16087 				    __SHIFTOUT(lat_enc,
   16088 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16089 		}
   16090 
   16091 		if (lat_ns) {
   16092 			lat_ns *= speed * 1000;
   16093 			lat_ns /= 8;
   16094 			lat_ns /= 1000000000;
   16095 			obff_hwm = (int32_t)(rxa - lat_ns);
   16096 		}
   16097 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16098 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16099 			    "(rxa = %d, lat_ns = %d)\n",
   16100 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16101 			return -1;
   16102 		}
   16103 	}
   16104 	/* Snoop and No-Snoop latencies the same */
   16105 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16106 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16107 
   16108 	/* Set OBFF high water mark */
   16109 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16110 	reg |= obff_hwm;
   16111 	CSR_WRITE(sc, WMREG_SVT, reg);
   16112 
   16113 	/* Enable OBFF */
   16114 	reg = CSR_READ(sc, WMREG_SVCR);
   16115 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16116 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16117 
   16118 	return 0;
   16119 }
   16120 
   16121 /*
   16122  * I210 Errata 25 and I211 Errata 10
   16123  * Slow System Clock.
   16124  */
   16125 static int
   16126 wm_pll_workaround_i210(struct wm_softc *sc)
   16127 {
   16128 	uint32_t mdicnfg, wuc;
   16129 	uint32_t reg;
   16130 	pcireg_t pcireg;
   16131 	uint32_t pmreg;
   16132 	uint16_t nvmword, tmp_nvmword;
   16133 	uint16_t phyval;
   16134 	bool wa_done = false;
   16135 	int i, rv = 0;
   16136 
   16137 	/* Get Power Management cap offset */
   16138 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16139 	    &pmreg, NULL) == 0)
   16140 		return -1;
   16141 
   16142 	/* Save WUC and MDICNFG registers */
   16143 	wuc = CSR_READ(sc, WMREG_WUC);
   16144 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16145 
   16146 	reg = mdicnfg & ~MDICNFG_DEST;
   16147 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16148 
   16149 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16150 		nvmword = INVM_DEFAULT_AL;
   16151 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16152 
   16153 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16154 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16155 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16156 
   16157 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16158 			rv = 0;
   16159 			break; /* OK */
   16160 		} else
   16161 			rv = -1;
   16162 
   16163 		wa_done = true;
   16164 		/* Directly reset the internal PHY */
   16165 		reg = CSR_READ(sc, WMREG_CTRL);
   16166 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16167 
   16168 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16169 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16170 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16171 
   16172 		CSR_WRITE(sc, WMREG_WUC, 0);
   16173 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16174 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16175 
   16176 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16177 		    pmreg + PCI_PMCSR);
   16178 		pcireg |= PCI_PMCSR_STATE_D3;
   16179 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16180 		    pmreg + PCI_PMCSR, pcireg);
   16181 		delay(1000);
   16182 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16183 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16184 		    pmreg + PCI_PMCSR, pcireg);
   16185 
   16186 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16187 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16188 
   16189 		/* Restore WUC register */
   16190 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16191 	}
   16192 
   16193 	/* Restore MDICNFG setting */
   16194 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16195 	if (wa_done)
   16196 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16197 	return rv;
   16198 }
   16199 
   16200 static void
   16201 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16202 {
   16203 	uint32_t reg;
   16204 
   16205 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16206 		device_xname(sc->sc_dev), __func__));
   16207 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16208 	    || (sc->sc_type == WM_T_PCH_CNP));
   16209 
   16210 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16211 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16212 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16213 
   16214 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16215 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16216 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16217 }
   16218