Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.606
      1 /*	$NetBSD: if_wm.c,v 1.606 2018/12/17 04:14:40 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.606 2018/12/17 04:14:40 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_flowflags;		/* 802.3x flow control flags */
    518 	int sc_align_tweak;
    519 
    520 	void *sc_ihs[WM_MAX_NINTR];	/*
    521 					 * interrupt cookie.
    522 					 * - legacy and msi use sc_ihs[0] only
    523 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    524 					 */
    525 	pci_intr_handle_t *sc_intrs;	/*
    526 					 * legacy and msi use sc_intrs[0] only
    527 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    528 					 */
    529 	int sc_nintrs;			/* number of interrupts */
    530 
    531 	int sc_link_intr_idx;		/* index of MSI-X tables */
    532 
    533 	callout_t sc_tick_ch;		/* tick callout */
    534 	bool sc_core_stopping;
    535 
    536 	int sc_nvm_ver_major;
    537 	int sc_nvm_ver_minor;
    538 	int sc_nvm_ver_build;
    539 	int sc_nvm_addrbits;		/* NVM address bits */
    540 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    541 	int sc_ich8_flash_base;
    542 	int sc_ich8_flash_bank_size;
    543 	int sc_nvm_k1_enabled;
    544 
    545 	int sc_nqueues;
    546 	struct wm_queue *sc_queue;
    547 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    548 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    549 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    550 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    551 
    552 	int sc_affinity_offset;
    553 
    554 #ifdef WM_EVENT_COUNTERS
    555 	/* Event counters. */
    556 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    557 
    558 	/* WM_T_82542_2_1 only */
    559 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    564 #endif /* WM_EVENT_COUNTERS */
    565 
    566 	/* This variable are used only on the 82547. */
    567 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    568 
    569 	uint32_t sc_ctrl;		/* prototype CTRL register */
    570 #if 0
    571 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    572 #endif
    573 	uint32_t sc_icr;		/* prototype interrupt bits */
    574 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    575 	uint32_t sc_tctl;		/* prototype TCTL register */
    576 	uint32_t sc_rctl;		/* prototype RCTL register */
    577 	uint32_t sc_txcw;		/* prototype TXCW register */
    578 	uint32_t sc_tipg;		/* prototype TIPG register */
    579 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    580 	uint32_t sc_pba;		/* prototype PBA register */
    581 
    582 	int sc_tbi_linkup;		/* TBI link status */
    583 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    584 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    585 
    586 	int sc_mchash_type;		/* multicast filter offset */
    587 
    588 	krndsource_t rnd_source;	/* random source */
    589 
    590 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    591 
    592 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    593 	kmutex_t *sc_ich_phymtx;	/*
    594 					 * 82574/82583/ICH/PCH specific PHY
    595 					 * mutex. For 82574/82583, the mutex
    596 					 * is used for both PHY and NVM.
    597 					 */
    598 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    599 
    600 	struct wm_phyop phy;
    601 	struct wm_nvmop nvm;
    602 };
    603 
    604 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    605 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    606 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    607 
    608 #define	WM_RXCHAIN_RESET(rxq)						\
    609 do {									\
    610 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    611 	*(rxq)->rxq_tailp = NULL;					\
    612 	(rxq)->rxq_len = 0;						\
    613 } while (/*CONSTCOND*/0)
    614 
    615 #define	WM_RXCHAIN_LINK(rxq, m)						\
    616 do {									\
    617 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    618 	(rxq)->rxq_tailp = &(m)->m_next;				\
    619 } while (/*CONSTCOND*/0)
    620 
    621 #ifdef WM_EVENT_COUNTERS
    622 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    623 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)			\
    626 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    627 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    628 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    629 #else /* !WM_EVENT_COUNTERS */
    630 #define	WM_EVCNT_INCR(ev)	/* nothing */
    631 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    632 
    633 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    634 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    635 #endif /* !WM_EVENT_COUNTERS */
    636 
    637 #define	CSR_READ(sc, reg)						\
    638 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    639 #define	CSR_WRITE(sc, reg, val)						\
    640 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    641 #define	CSR_WRITE_FLUSH(sc)						\
    642 	(void) CSR_READ((sc), WMREG_STATUS)
    643 
    644 #define ICH8_FLASH_READ32(sc, reg)					\
    645 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    646 	    (reg) + sc->sc_flashreg_offset)
    647 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    648 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset, (data))
    650 
    651 #define ICH8_FLASH_READ16(sc, reg)					\
    652 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset)
    654 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    655 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset, (data))
    657 
    658 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    659 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    660 
    661 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    662 #define	WM_CDTXADDR_HI(txq, x)						\
    663 	(sizeof(bus_addr_t) == 8 ?					\
    664 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    665 
    666 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    667 #define	WM_CDRXADDR_HI(rxq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    670 
    671 /*
    672  * Register read/write functions.
    673  * Other than CSR_{READ|WRITE}().
    674  */
    675 #if 0
    676 static inline uint32_t wm_io_read(struct wm_softc *, int);
    677 #endif
    678 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    679 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    680     uint32_t, uint32_t);
    681 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    682 
    683 /*
    684  * Descriptor sync/init functions.
    685  */
    686 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    687 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    688 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    689 
    690 /*
    691  * Device driver interface functions and commonly used functions.
    692  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    693  */
    694 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    695 static int	wm_match(device_t, cfdata_t, void *);
    696 static void	wm_attach(device_t, device_t, void *);
    697 static int	wm_detach(device_t, int);
    698 static bool	wm_suspend(device_t, const pmf_qual_t *);
    699 static bool	wm_resume(device_t, const pmf_qual_t *);
    700 static void	wm_watchdog(struct ifnet *);
    701 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_tick(void *);
    706 static int	wm_ifflags_cb(struct ethercom *);
    707 static int	wm_ioctl(struct ifnet *, u_long, void *);
    708 /* MAC address related */
    709 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    710 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    711 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    712 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    713 static void	wm_set_filter(struct wm_softc *);
    714 /* Reset and init related */
    715 static void	wm_set_vlan(struct wm_softc *);
    716 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    717 static void	wm_get_auto_rd_done(struct wm_softc *);
    718 static void	wm_lan_init_done(struct wm_softc *);
    719 static void	wm_get_cfg_done(struct wm_softc *);
    720 static void	wm_phy_post_reset(struct wm_softc *);
    721 static int	wm_write_smbus_addr(struct wm_softc *);
    722 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    723 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    724 static void	wm_initialize_hardware_bits(struct wm_softc *);
    725 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    726 static int	wm_reset_phy(struct wm_softc *);
    727 static void	wm_flush_desc_rings(struct wm_softc *);
    728 static void	wm_reset(struct wm_softc *);
    729 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    730 static void	wm_rxdrain(struct wm_rxqueue *);
    731 static void	wm_init_rss(struct wm_softc *);
    732 static void	wm_adjust_qnum(struct wm_softc *, int);
    733 static inline bool	wm_is_using_msix(struct wm_softc *);
    734 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    735 static int	wm_softint_establish(struct wm_softc *, int, int);
    736 static int	wm_setup_legacy(struct wm_softc *);
    737 static int	wm_setup_msix(struct wm_softc *);
    738 static int	wm_init(struct ifnet *);
    739 static int	wm_init_locked(struct ifnet *);
    740 static void	wm_unset_stopping_flags(struct wm_softc *);
    741 static void	wm_set_stopping_flags(struct wm_softc *);
    742 static void	wm_stop(struct ifnet *, int);
    743 static void	wm_stop_locked(struct ifnet *, int);
    744 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    745 static void	wm_82547_txfifo_stall(void *);
    746 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    747 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    748 /* DMA related */
    749 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    752 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    753     struct wm_txqueue *);
    754 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    756 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_rxqueue *);
    758 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    759 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    761 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    762 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    763 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    764 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    765     struct wm_txqueue *);
    766 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    767     struct wm_rxqueue *);
    768 static int	wm_alloc_txrx_queues(struct wm_softc *);
    769 static void	wm_free_txrx_queues(struct wm_softc *);
    770 static int	wm_init_txrx_queues(struct wm_softc *);
    771 /* Start */
    772 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    773     struct wm_txsoft *, uint32_t *, uint8_t *);
    774 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    775 static void	wm_start(struct ifnet *);
    776 static void	wm_start_locked(struct ifnet *);
    777 static int	wm_transmit(struct ifnet *, struct mbuf *);
    778 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    779 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    780     bool);
    781 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    782     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    783 static void	wm_nq_start(struct ifnet *);
    784 static void	wm_nq_start_locked(struct ifnet *);
    785 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    786 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    787 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    788     bool);
    789 static void	wm_deferred_start_locked(struct wm_txqueue *);
    790 static void	wm_handle_queue(void *);
    791 /* Interrupt */
    792 static bool	wm_txeof(struct wm_txqueue *, u_int);
    793 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    794 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    795 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    796 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    797 static void	wm_linkintr(struct wm_softc *, uint32_t);
    798 static int	wm_intr_legacy(void *);
    799 static inline void	wm_txrxintr_disable(struct wm_queue *);
    800 static inline void	wm_txrxintr_enable(struct wm_queue *);
    801 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    802 static int	wm_txrxintr_msix(void *);
    803 static int	wm_linkintr_msix(void *);
    804 
    805 /*
    806  * Media related.
    807  * GMII, SGMII, TBI, SERDES and SFP.
    808  */
    809 /* Common */
    810 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    811 /* GMII related */
    812 static void	wm_gmii_reset(struct wm_softc *);
    813 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    814 static int	wm_get_phy_id_82575(struct wm_softc *);
    815 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    816 static int	wm_gmii_mediachange(struct ifnet *);
    817 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    818 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    819 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    820 static int	wm_gmii_i82543_readreg(device_t, int, int);
    821 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    822 static int	wm_gmii_mdic_readreg(device_t, int, int);
    823 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    824 static int	wm_gmii_i82544_readreg(device_t, int, int);
    825 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    826 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    827 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    828 static int	wm_gmii_i80003_readreg(device_t, int, int);
    829 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    830 static int	wm_gmii_bm_readreg(device_t, int, int);
    831 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    832 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    833 static int	wm_gmii_hv_readreg(device_t, int, int);
    834 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    835 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    836 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    837 static int	wm_gmii_82580_readreg(device_t, int, int);
    838 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    839 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    840 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    841 static void	wm_gmii_statchg(struct ifnet *);
    842 /*
    843  * kumeran related (80003, ICH* and PCH*).
    844  * These functions are not for accessing MII registers but for accessing
    845  * kumeran specific registers.
    846  */
    847 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    848 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    849 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    850 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    851 /* SGMII */
    852 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    853 static int	wm_sgmii_readreg(device_t, int, int);
    854 static void	wm_sgmii_writereg(device_t, int, int, int);
    855 /* TBI related */
    856 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    857 static void	wm_tbi_mediainit(struct wm_softc *);
    858 static int	wm_tbi_mediachange(struct ifnet *);
    859 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    860 static int	wm_check_for_link(struct wm_softc *);
    861 static void	wm_tbi_tick(struct wm_softc *);
    862 /* SERDES related */
    863 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    864 static int	wm_serdes_mediachange(struct ifnet *);
    865 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    866 static void	wm_serdes_tick(struct wm_softc *);
    867 /* SFP related */
    868 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    869 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    870 
    871 /*
    872  * NVM related.
    873  * Microwire, SPI (w/wo EERD) and Flash.
    874  */
    875 /* Misc functions */
    876 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    877 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    878 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    879 /* Microwire */
    880 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    881 /* SPI */
    882 static int	wm_nvm_ready_spi(struct wm_softc *);
    883 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    884 /* Using with EERD */
    885 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    886 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    887 /* Flash */
    888 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    889     unsigned int *);
    890 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    891 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    892 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    893     uint32_t *);
    894 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    895 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    896 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    897 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    898 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    899 /* iNVM */
    900 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    901 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    902 /* Lock, detecting NVM type, validate checksum and read */
    903 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    904 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    905 static int	wm_nvm_validate_checksum(struct wm_softc *);
    906 static void	wm_nvm_version_invm(struct wm_softc *);
    907 static void	wm_nvm_version(struct wm_softc *);
    908 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    909 
    910 /*
    911  * Hardware semaphores.
    912  * Very complexed...
    913  */
    914 static int	wm_get_null(struct wm_softc *);
    915 static void	wm_put_null(struct wm_softc *);
    916 static int	wm_get_eecd(struct wm_softc *);
    917 static void	wm_put_eecd(struct wm_softc *);
    918 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    919 static void	wm_put_swsm_semaphore(struct wm_softc *);
    920 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    921 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    922 static int	wm_get_nvm_80003(struct wm_softc *);
    923 static void	wm_put_nvm_80003(struct wm_softc *);
    924 static int	wm_get_nvm_82571(struct wm_softc *);
    925 static void	wm_put_nvm_82571(struct wm_softc *);
    926 static int	wm_get_phy_82575(struct wm_softc *);
    927 static void	wm_put_phy_82575(struct wm_softc *);
    928 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    929 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    930 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    931 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    932 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    933 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    934 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    935 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    936 
    937 /*
    938  * Management mode and power management related subroutines.
    939  * BMC, AMT, suspend/resume and EEE.
    940  */
    941 #if 0
    942 static int	wm_check_mng_mode(struct wm_softc *);
    943 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    944 static int	wm_check_mng_mode_82574(struct wm_softc *);
    945 static int	wm_check_mng_mode_generic(struct wm_softc *);
    946 #endif
    947 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    948 static bool	wm_phy_resetisblocked(struct wm_softc *);
    949 static void	wm_get_hw_control(struct wm_softc *);
    950 static void	wm_release_hw_control(struct wm_softc *);
    951 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    952 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    953 static void	wm_init_manageability(struct wm_softc *);
    954 static void	wm_release_manageability(struct wm_softc *);
    955 static void	wm_get_wakeup(struct wm_softc *);
    956 static int	wm_ulp_disable(struct wm_softc *);
    957 static void	wm_enable_phy_wakeup(struct wm_softc *);
    958 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    959 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    960 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    961 static void	wm_enable_wakeup(struct wm_softc *);
    962 static void	wm_disable_aspm(struct wm_softc *);
    963 /* LPLU (Low Power Link Up) */
    964 static void	wm_lplu_d0_disable(struct wm_softc *);
    965 /* EEE */
    966 static void	wm_set_eee_i350(struct wm_softc *);
    967 
    968 /*
    969  * Workarounds (mainly PHY related).
    970  * Basically, PHY's workarounds are in the PHY drivers.
    971  */
    972 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    973 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    974 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    975 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    976 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    977 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    978 static int	wm_k1_workaround_lv(struct wm_softc *);
    979 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    980 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    981 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    982 static void	wm_reset_init_script_82575(struct wm_softc *);
    983 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    984 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    985 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    986 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    987 static void	wm_pll_workaround_i210(struct wm_softc *);
    988 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    989 
    990 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    991     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    992 
    993 /*
    994  * Devices supported by this driver.
    995  */
    996 static const struct wm_product {
    997 	pci_vendor_id_t		wmp_vendor;
    998 	pci_product_id_t	wmp_product;
    999 	const char		*wmp_name;
   1000 	wm_chip_type		wmp_type;
   1001 	uint32_t		wmp_flags;
   1002 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1003 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1004 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1005 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1006 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1007 } wm_products[] = {
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1009 	  "Intel i82542 1000BASE-X Ethernet",
   1010 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1013 	  "Intel i82543GC 1000BASE-X Ethernet",
   1014 	  WM_T_82543,		WMP_F_FIBER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1017 	  "Intel i82543GC 1000BASE-T Ethernet",
   1018 	  WM_T_82543,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1021 	  "Intel i82544EI 1000BASE-T Ethernet",
   1022 	  WM_T_82544,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1025 	  "Intel i82544EI 1000BASE-X Ethernet",
   1026 	  WM_T_82544,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1029 	  "Intel i82544GC 1000BASE-T Ethernet",
   1030 	  WM_T_82544,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1033 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1034 	  WM_T_82544,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1037 	  "Intel i82540EM 1000BASE-T Ethernet",
   1038 	  WM_T_82540,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1041 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1042 	  WM_T_82540,		WMP_F_COPPER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1045 	  "Intel i82540EP 1000BASE-T Ethernet",
   1046 	  WM_T_82540,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1049 	  "Intel i82540EP 1000BASE-T Ethernet",
   1050 	  WM_T_82540,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1053 	  "Intel i82540EP 1000BASE-T Ethernet",
   1054 	  WM_T_82540,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1057 	  "Intel i82545EM 1000BASE-T Ethernet",
   1058 	  WM_T_82545,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1061 	  "Intel i82545GM 1000BASE-T Ethernet",
   1062 	  WM_T_82545_3,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1065 	  "Intel i82545GM 1000BASE-X Ethernet",
   1066 	  WM_T_82545_3,		WMP_F_FIBER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1069 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1070 	  WM_T_82545_3,		WMP_F_SERDES },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1073 	  "Intel i82546EB 1000BASE-T Ethernet",
   1074 	  WM_T_82546,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1077 	  "Intel i82546EB 1000BASE-T Ethernet",
   1078 	  WM_T_82546,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1081 	  "Intel i82545EM 1000BASE-X Ethernet",
   1082 	  WM_T_82545,		WMP_F_FIBER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1085 	  "Intel i82546EB 1000BASE-X Ethernet",
   1086 	  WM_T_82546,		WMP_F_FIBER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1089 	  "Intel i82546GB 1000BASE-T Ethernet",
   1090 	  WM_T_82546_3,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1093 	  "Intel i82546GB 1000BASE-X Ethernet",
   1094 	  WM_T_82546_3,		WMP_F_FIBER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1097 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1098 	  WM_T_82546_3,		WMP_F_SERDES },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1101 	  "i82546GB quad-port Gigabit Ethernet",
   1102 	  WM_T_82546_3,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1105 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1106 	  WM_T_82546_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1109 	  "Intel PRO/1000MT (82546GB)",
   1110 	  WM_T_82546_3,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1113 	  "Intel i82541EI 1000BASE-T Ethernet",
   1114 	  WM_T_82541,		WMP_F_COPPER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1117 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1118 	  WM_T_82541,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1121 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1122 	  WM_T_82541,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1125 	  "Intel i82541ER 1000BASE-T Ethernet",
   1126 	  WM_T_82541_2,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1129 	  "Intel i82541GI 1000BASE-T Ethernet",
   1130 	  WM_T_82541_2,		WMP_F_COPPER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1133 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1134 	  WM_T_82541_2,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1137 	  "Intel i82541PI 1000BASE-T Ethernet",
   1138 	  WM_T_82541_2,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1141 	  "Intel i82547EI 1000BASE-T Ethernet",
   1142 	  WM_T_82547,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1145 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1146 	  WM_T_82547,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1149 	  "Intel i82547GI 1000BASE-T Ethernet",
   1150 	  WM_T_82547_2,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1153 	  "Intel PRO/1000 PT (82571EB)",
   1154 	  WM_T_82571,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1157 	  "Intel PRO/1000 PF (82571EB)",
   1158 	  WM_T_82571,		WMP_F_FIBER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1161 	  "Intel PRO/1000 PB (82571EB)",
   1162 	  WM_T_82571,		WMP_F_SERDES },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1165 	  "Intel PRO/1000 QT (82571EB)",
   1166 	  WM_T_82571,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1169 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1170 	  WM_T_82571,		WMP_F_COPPER, },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1173 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1174 	  WM_T_82571,		WMP_F_COPPER, },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1177 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1178 	  WM_T_82571,		WMP_F_SERDES, },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1181 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1182 	  WM_T_82571,		WMP_F_SERDES, },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1185 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1186 	  WM_T_82571,		WMP_F_FIBER, },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1189 	  "Intel i82572EI 1000baseT Ethernet",
   1190 	  WM_T_82572,		WMP_F_COPPER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1193 	  "Intel i82572EI 1000baseX Ethernet",
   1194 	  WM_T_82572,		WMP_F_FIBER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1197 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1198 	  WM_T_82572,		WMP_F_SERDES },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1201 	  "Intel i82572EI 1000baseT Ethernet",
   1202 	  WM_T_82572,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1205 	  "Intel i82573E",
   1206 	  WM_T_82573,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1209 	  "Intel i82573E IAMT",
   1210 	  WM_T_82573,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1213 	  "Intel i82573L Gigabit Ethernet",
   1214 	  WM_T_82573,		WMP_F_COPPER },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1217 	  "Intel i82574L",
   1218 	  WM_T_82574,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1221 	  "Intel i82574L",
   1222 	  WM_T_82574,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1225 	  "Intel i82583V",
   1226 	  WM_T_82583,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1229 	  "i80003 dual 1000baseT Ethernet",
   1230 	  WM_T_80003,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1233 	  "i80003 dual 1000baseX Ethernet",
   1234 	  WM_T_80003,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1237 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1238 	  WM_T_80003,		WMP_F_SERDES },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1241 	  "Intel i80003 1000baseT Ethernet",
   1242 	  WM_T_80003,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1245 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1246 	  WM_T_80003,		WMP_F_SERDES },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1249 	  "Intel i82801H (M_AMT) LAN Controller",
   1250 	  WM_T_ICH8,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1252 	  "Intel i82801H (AMT) LAN Controller",
   1253 	  WM_T_ICH8,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1255 	  "Intel i82801H LAN Controller",
   1256 	  WM_T_ICH8,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1258 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1259 	  WM_T_ICH8,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1261 	  "Intel i82801H (M) LAN Controller",
   1262 	  WM_T_ICH8,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1264 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1265 	  WM_T_ICH8,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1267 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1270 	  "82567V-3 LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1273 	  "82801I (AMT) LAN Controller",
   1274 	  WM_T_ICH9,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1276 	  "82801I 10/100 LAN Controller",
   1277 	  WM_T_ICH9,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1279 	  "82801I (G) 10/100 LAN Controller",
   1280 	  WM_T_ICH9,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1282 	  "82801I (GT) 10/100 LAN Controller",
   1283 	  WM_T_ICH9,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1285 	  "82801I (C) LAN Controller",
   1286 	  WM_T_ICH9,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1288 	  "82801I mobile LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1291 	  "82801I mobile (V) LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1294 	  "82801I mobile (AMT) LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1297 	  "82567LM-4 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1300 	  "82567LM-2 LAN Controller",
   1301 	  WM_T_ICH10,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1303 	  "82567LF-2 LAN Controller",
   1304 	  WM_T_ICH10,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1306 	  "82567LM-3 LAN Controller",
   1307 	  WM_T_ICH10,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1309 	  "82567LF-3 LAN Controller",
   1310 	  WM_T_ICH10,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1312 	  "82567V-2 LAN Controller",
   1313 	  WM_T_ICH10,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1315 	  "82567V-3? LAN Controller",
   1316 	  WM_T_ICH10,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1318 	  "HANKSVILLE LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1321 	  "PCH LAN (82577LM) Controller",
   1322 	  WM_T_PCH,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1324 	  "PCH LAN (82577LC) Controller",
   1325 	  WM_T_PCH,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1327 	  "PCH LAN (82578DM) Controller",
   1328 	  WM_T_PCH,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1330 	  "PCH LAN (82578DC) Controller",
   1331 	  WM_T_PCH,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1333 	  "PCH2 LAN (82579LM) Controller",
   1334 	  WM_T_PCH2,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1336 	  "PCH2 LAN (82579V) Controller",
   1337 	  WM_T_PCH2,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1339 	  "82575EB dual-1000baseT Ethernet",
   1340 	  WM_T_82575,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1342 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1343 	  WM_T_82575,		WMP_F_SERDES },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1345 	  "82575GB quad-1000baseT Ethernet",
   1346 	  WM_T_82575,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1348 	  "82575GB quad-1000baseT Ethernet (PM)",
   1349 	  WM_T_82575,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1351 	  "82576 1000BaseT Ethernet",
   1352 	  WM_T_82576,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1354 	  "82576 1000BaseX Ethernet",
   1355 	  WM_T_82576,		WMP_F_FIBER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1358 	  "82576 gigabit Ethernet (SERDES)",
   1359 	  WM_T_82576,		WMP_F_SERDES },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1362 	  "82576 quad-1000BaseT Ethernet",
   1363 	  WM_T_82576,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1366 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1367 	  WM_T_82576,		WMP_F_COPPER },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1370 	  "82576 gigabit Ethernet",
   1371 	  WM_T_82576,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1374 	  "82576 gigabit Ethernet (SERDES)",
   1375 	  WM_T_82576,		WMP_F_SERDES },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1377 	  "82576 quad-gigabit Ethernet (SERDES)",
   1378 	  WM_T_82576,		WMP_F_SERDES },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1381 	  "82580 1000BaseT Ethernet",
   1382 	  WM_T_82580,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1384 	  "82580 1000BaseX Ethernet",
   1385 	  WM_T_82580,		WMP_F_FIBER },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1388 	  "82580 1000BaseT Ethernet (SERDES)",
   1389 	  WM_T_82580,		WMP_F_SERDES },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1392 	  "82580 gigabit Ethernet (SGMII)",
   1393 	  WM_T_82580,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1395 	  "82580 dual-1000BaseT Ethernet",
   1396 	  WM_T_82580,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1399 	  "82580 quad-1000BaseX Ethernet",
   1400 	  WM_T_82580,		WMP_F_FIBER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1403 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1404 	  WM_T_82580,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1407 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1408 	  WM_T_82580,		WMP_F_SERDES },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1411 	  "DH89XXCC 1000BASE-KX Ethernet",
   1412 	  WM_T_82580,		WMP_F_SERDES },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1415 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1416 	  WM_T_82580,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1419 	  "I350 Gigabit Network Connection",
   1420 	  WM_T_I350,		WMP_F_COPPER },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1423 	  "I350 Gigabit Fiber Network Connection",
   1424 	  WM_T_I350,		WMP_F_FIBER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1427 	  "I350 Gigabit Backplane Connection",
   1428 	  WM_T_I350,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1431 	  "I350 Quad Port Gigabit Ethernet",
   1432 	  WM_T_I350,		WMP_F_SERDES },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1435 	  "I350 Gigabit Connection",
   1436 	  WM_T_I350,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1439 	  "I354 Gigabit Ethernet (KX)",
   1440 	  WM_T_I354,		WMP_F_SERDES },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1443 	  "I354 Gigabit Ethernet (SGMII)",
   1444 	  WM_T_I354,		WMP_F_COPPER },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1447 	  "I354 Gigabit Ethernet (2.5G)",
   1448 	  WM_T_I354,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1451 	  "I210-T1 Ethernet Server Adapter",
   1452 	  WM_T_I210,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1455 	  "I210 Ethernet (Copper OEM)",
   1456 	  WM_T_I210,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1459 	  "I210 Ethernet (Copper IT)",
   1460 	  WM_T_I210,		WMP_F_COPPER },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1463 	  "I210 Ethernet (FLASH less)",
   1464 	  WM_T_I210,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1467 	  "I210 Gigabit Ethernet (Fiber)",
   1468 	  WM_T_I210,		WMP_F_FIBER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1471 	  "I210 Gigabit Ethernet (SERDES)",
   1472 	  WM_T_I210,		WMP_F_SERDES },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1475 	  "I210 Gigabit Ethernet (FLASH less)",
   1476 	  WM_T_I210,		WMP_F_SERDES },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1479 	  "I210 Gigabit Ethernet (SGMII)",
   1480 	  WM_T_I210,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1483 	  "I211 Ethernet (COPPER)",
   1484 	  WM_T_I211,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1486 	  "I217 V Ethernet Connection",
   1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1489 	  "I217 LM Ethernet Connection",
   1490 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1492 	  "I218 V Ethernet Connection",
   1493 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1495 	  "I218 V Ethernet Connection",
   1496 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1498 	  "I218 V Ethernet Connection",
   1499 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1501 	  "I218 LM Ethernet Connection",
   1502 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1504 	  "I218 LM Ethernet Connection",
   1505 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1507 	  "I218 LM Ethernet Connection",
   1508 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1510 	  "I219 V Ethernet Connection",
   1511 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1513 	  "I219 V Ethernet Connection",
   1514 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1516 	  "I219 V Ethernet Connection",
   1517 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1519 	  "I219 V Ethernet Connection",
   1520 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1522 	  "I219 LM Ethernet Connection",
   1523 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1525 	  "I219 LM Ethernet Connection",
   1526 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1528 	  "I219 LM Ethernet Connection",
   1529 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1531 	  "I219 LM Ethernet Connection",
   1532 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1537 	  "I219 V Ethernet Connection",
   1538 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1540 	  "I219 V Ethernet Connection",
   1541 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1543 	  "I219 LM Ethernet Connection",
   1544 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1548 	{ 0,			0,
   1549 	  NULL,
   1550 	  0,			0 },
   1551 };
   1552 
   1553 /*
   1554  * Register read/write functions.
   1555  * Other than CSR_{READ|WRITE}().
   1556  */
   1557 
   1558 #if 0 /* Not currently used */
   1559 static inline uint32_t
   1560 wm_io_read(struct wm_softc *sc, int reg)
   1561 {
   1562 
   1563 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1564 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1565 }
   1566 #endif
   1567 
   1568 static inline void
   1569 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1570 {
   1571 
   1572 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1573 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1574 }
   1575 
   1576 static inline void
   1577 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1578     uint32_t data)
   1579 {
   1580 	uint32_t regval;
   1581 	int i;
   1582 
   1583 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1584 
   1585 	CSR_WRITE(sc, reg, regval);
   1586 
   1587 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1588 		delay(5);
   1589 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1590 			break;
   1591 	}
   1592 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1593 		aprint_error("%s: WARNING:"
   1594 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1595 		    device_xname(sc->sc_dev), reg);
   1596 	}
   1597 }
   1598 
   1599 static inline void
   1600 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1601 {
   1602 	wa->wa_low = htole32(v & 0xffffffffU);
   1603 	if (sizeof(bus_addr_t) == 8)
   1604 		wa->wa_high = htole32((uint64_t) v >> 32);
   1605 	else
   1606 		wa->wa_high = 0;
   1607 }
   1608 
   1609 /*
   1610  * Descriptor sync/init functions.
   1611  */
   1612 static inline void
   1613 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1614 {
   1615 	struct wm_softc *sc = txq->txq_sc;
   1616 
   1617 	/* If it will wrap around, sync to the end of the ring. */
   1618 	if ((start + num) > WM_NTXDESC(txq)) {
   1619 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1620 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1621 		    (WM_NTXDESC(txq) - start), ops);
   1622 		num -= (WM_NTXDESC(txq) - start);
   1623 		start = 0;
   1624 	}
   1625 
   1626 	/* Now sync whatever is left. */
   1627 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1628 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1629 }
   1630 
   1631 static inline void
   1632 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1633 {
   1634 	struct wm_softc *sc = rxq->rxq_sc;
   1635 
   1636 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1637 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1638 }
   1639 
   1640 static inline void
   1641 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1642 {
   1643 	struct wm_softc *sc = rxq->rxq_sc;
   1644 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1645 	struct mbuf *m = rxs->rxs_mbuf;
   1646 
   1647 	/*
   1648 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1649 	 * so that the payload after the Ethernet header is aligned
   1650 	 * to a 4-byte boundary.
   1651 
   1652 	 * XXX BRAINDAMAGE ALERT!
   1653 	 * The stupid chip uses the same size for every buffer, which
   1654 	 * is set in the Receive Control register.  We are using the 2K
   1655 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1656 	 * reason, we can't "scoot" packets longer than the standard
   1657 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1658 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1659 	 * the upper layer copy the headers.
   1660 	 */
   1661 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1662 
   1663 	if (sc->sc_type == WM_T_82574) {
   1664 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1665 		rxd->erx_data.erxd_addr =
   1666 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1667 		rxd->erx_data.erxd_dd = 0;
   1668 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1669 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1670 
   1671 		rxd->nqrx_data.nrxd_paddr =
   1672 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1673 		/* Currently, split header is not supported. */
   1674 		rxd->nqrx_data.nrxd_haddr = 0;
   1675 	} else {
   1676 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1677 
   1678 		wm_set_dma_addr(&rxd->wrx_addr,
   1679 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1680 		rxd->wrx_len = 0;
   1681 		rxd->wrx_cksum = 0;
   1682 		rxd->wrx_status = 0;
   1683 		rxd->wrx_errors = 0;
   1684 		rxd->wrx_special = 0;
   1685 	}
   1686 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1687 
   1688 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1689 }
   1690 
   1691 /*
   1692  * Device driver interface functions and commonly used functions.
   1693  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1694  */
   1695 
   1696 /* Lookup supported device table */
   1697 static const struct wm_product *
   1698 wm_lookup(const struct pci_attach_args *pa)
   1699 {
   1700 	const struct wm_product *wmp;
   1701 
   1702 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1703 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1704 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1705 			return wmp;
   1706 	}
   1707 	return NULL;
   1708 }
   1709 
   1710 /* The match function (ca_match) */
   1711 static int
   1712 wm_match(device_t parent, cfdata_t cf, void *aux)
   1713 {
   1714 	struct pci_attach_args *pa = aux;
   1715 
   1716 	if (wm_lookup(pa) != NULL)
   1717 		return 1;
   1718 
   1719 	return 0;
   1720 }
   1721 
   1722 /* The attach function (ca_attach) */
   1723 static void
   1724 wm_attach(device_t parent, device_t self, void *aux)
   1725 {
   1726 	struct wm_softc *sc = device_private(self);
   1727 	struct pci_attach_args *pa = aux;
   1728 	prop_dictionary_t dict;
   1729 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1730 	pci_chipset_tag_t pc = pa->pa_pc;
   1731 	int counts[PCI_INTR_TYPE_SIZE];
   1732 	pci_intr_type_t max_type;
   1733 	const char *eetype, *xname;
   1734 	bus_space_tag_t memt;
   1735 	bus_space_handle_t memh;
   1736 	bus_size_t memsize;
   1737 	int memh_valid;
   1738 	int i, error;
   1739 	const struct wm_product *wmp;
   1740 	prop_data_t ea;
   1741 	prop_number_t pn;
   1742 	uint8_t enaddr[ETHER_ADDR_LEN];
   1743 	char buf[256];
   1744 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1745 	pcireg_t preg, memtype;
   1746 	uint16_t eeprom_data, apme_mask;
   1747 	bool force_clear_smbi;
   1748 	uint32_t link_mode;
   1749 	uint32_t reg;
   1750 
   1751 	sc->sc_dev = self;
   1752 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1753 	sc->sc_core_stopping = false;
   1754 
   1755 	wmp = wm_lookup(pa);
   1756 #ifdef DIAGNOSTIC
   1757 	if (wmp == NULL) {
   1758 		printf("\n");
   1759 		panic("wm_attach: impossible");
   1760 	}
   1761 #endif
   1762 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1763 
   1764 	sc->sc_pc = pa->pa_pc;
   1765 	sc->sc_pcitag = pa->pa_tag;
   1766 
   1767 	if (pci_dma64_available(pa))
   1768 		sc->sc_dmat = pa->pa_dmat64;
   1769 	else
   1770 		sc->sc_dmat = pa->pa_dmat;
   1771 
   1772 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1773 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1774 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1775 
   1776 	sc->sc_type = wmp->wmp_type;
   1777 
   1778 	/* Set default function pointers */
   1779 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1780 	sc->phy.release = sc->nvm.release = wm_put_null;
   1781 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1782 
   1783 	if (sc->sc_type < WM_T_82543) {
   1784 		if (sc->sc_rev < 2) {
   1785 			aprint_error_dev(sc->sc_dev,
   1786 			    "i82542 must be at least rev. 2\n");
   1787 			return;
   1788 		}
   1789 		if (sc->sc_rev < 3)
   1790 			sc->sc_type = WM_T_82542_2_0;
   1791 	}
   1792 
   1793 	/*
   1794 	 * Disable MSI for Errata:
   1795 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1796 	 *
   1797 	 *  82544: Errata 25
   1798 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1799 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1800 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1801 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1802 	 *
   1803 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1804 	 *
   1805 	 *  82571 & 82572: Errata 63
   1806 	 */
   1807 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1808 	    || (sc->sc_type == WM_T_82572))
   1809 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1810 
   1811 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1812 	    || (sc->sc_type == WM_T_82580)
   1813 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1814 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1815 		sc->sc_flags |= WM_F_NEWQUEUE;
   1816 
   1817 	/* Set device properties (mactype) */
   1818 	dict = device_properties(sc->sc_dev);
   1819 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1820 
   1821 	/*
   1822 	 * Map the device.  All devices support memory-mapped acccess,
   1823 	 * and it is really required for normal operation.
   1824 	 */
   1825 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1826 	switch (memtype) {
   1827 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1828 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1829 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1830 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1831 		break;
   1832 	default:
   1833 		memh_valid = 0;
   1834 		break;
   1835 	}
   1836 
   1837 	if (memh_valid) {
   1838 		sc->sc_st = memt;
   1839 		sc->sc_sh = memh;
   1840 		sc->sc_ss = memsize;
   1841 	} else {
   1842 		aprint_error_dev(sc->sc_dev,
   1843 		    "unable to map device registers\n");
   1844 		return;
   1845 	}
   1846 
   1847 	/*
   1848 	 * In addition, i82544 and later support I/O mapped indirect
   1849 	 * register access.  It is not desirable (nor supported in
   1850 	 * this driver) to use it for normal operation, though it is
   1851 	 * required to work around bugs in some chip versions.
   1852 	 */
   1853 	if (sc->sc_type >= WM_T_82544) {
   1854 		/* First we have to find the I/O BAR. */
   1855 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1856 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1857 			if (memtype == PCI_MAPREG_TYPE_IO)
   1858 				break;
   1859 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1860 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1861 				i += 4;	/* skip high bits, too */
   1862 		}
   1863 		if (i < PCI_MAPREG_END) {
   1864 			/*
   1865 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1866 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1867 			 * It's no problem because newer chips has no this
   1868 			 * bug.
   1869 			 *
   1870 			 * The i8254x doesn't apparently respond when the
   1871 			 * I/O BAR is 0, which looks somewhat like it's not
   1872 			 * been configured.
   1873 			 */
   1874 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1875 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1876 				aprint_error_dev(sc->sc_dev,
   1877 				    "WARNING: I/O BAR at zero.\n");
   1878 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1879 					0, &sc->sc_iot, &sc->sc_ioh,
   1880 					NULL, &sc->sc_ios) == 0) {
   1881 				sc->sc_flags |= WM_F_IOH_VALID;
   1882 			} else
   1883 				aprint_error_dev(sc->sc_dev,
   1884 				    "WARNING: unable to map I/O space\n");
   1885 		}
   1886 
   1887 	}
   1888 
   1889 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1890 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1891 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1892 	if (sc->sc_type < WM_T_82542_2_1)
   1893 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1894 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1895 
   1896 	/* power up chip */
   1897 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1898 	    && error != EOPNOTSUPP) {
   1899 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1900 		return;
   1901 	}
   1902 
   1903 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1904 	/*
   1905 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1906 	 * resource.
   1907 	 */
   1908 	if (sc->sc_nqueues > 1) {
   1909 		max_type = PCI_INTR_TYPE_MSIX;
   1910 		/*
   1911 		 *  82583 has a MSI-X capability in the PCI configuration space
   1912 		 * but it doesn't support it. At least the document doesn't
   1913 		 * say anything about MSI-X.
   1914 		 */
   1915 		counts[PCI_INTR_TYPE_MSIX]
   1916 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1917 	} else {
   1918 		max_type = PCI_INTR_TYPE_MSI;
   1919 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1920 	}
   1921 
   1922 	/* Allocation settings */
   1923 	counts[PCI_INTR_TYPE_MSI] = 1;
   1924 	counts[PCI_INTR_TYPE_INTX] = 1;
   1925 	/* overridden by disable flags */
   1926 	if (wm_disable_msi != 0) {
   1927 		counts[PCI_INTR_TYPE_MSI] = 0;
   1928 		if (wm_disable_msix != 0) {
   1929 			max_type = PCI_INTR_TYPE_INTX;
   1930 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1931 		}
   1932 	} else if (wm_disable_msix != 0) {
   1933 		max_type = PCI_INTR_TYPE_MSI;
   1934 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1935 	}
   1936 
   1937 alloc_retry:
   1938 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1939 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1940 		return;
   1941 	}
   1942 
   1943 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1944 		error = wm_setup_msix(sc);
   1945 		if (error) {
   1946 			pci_intr_release(pc, sc->sc_intrs,
   1947 			    counts[PCI_INTR_TYPE_MSIX]);
   1948 
   1949 			/* Setup for MSI: Disable MSI-X */
   1950 			max_type = PCI_INTR_TYPE_MSI;
   1951 			counts[PCI_INTR_TYPE_MSI] = 1;
   1952 			counts[PCI_INTR_TYPE_INTX] = 1;
   1953 			goto alloc_retry;
   1954 		}
   1955 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1956 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1957 		error = wm_setup_legacy(sc);
   1958 		if (error) {
   1959 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1960 			    counts[PCI_INTR_TYPE_MSI]);
   1961 
   1962 			/* The next try is for INTx: Disable MSI */
   1963 			max_type = PCI_INTR_TYPE_INTX;
   1964 			counts[PCI_INTR_TYPE_INTX] = 1;
   1965 			goto alloc_retry;
   1966 		}
   1967 	} else {
   1968 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1969 		error = wm_setup_legacy(sc);
   1970 		if (error) {
   1971 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1972 			    counts[PCI_INTR_TYPE_INTX]);
   1973 			return;
   1974 		}
   1975 	}
   1976 
   1977 	/*
   1978 	 * Check the function ID (unit number of the chip).
   1979 	 */
   1980 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1981 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1982 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1983 	    || (sc->sc_type == WM_T_82580)
   1984 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1985 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1986 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1987 	else
   1988 		sc->sc_funcid = 0;
   1989 
   1990 	/*
   1991 	 * Determine a few things about the bus we're connected to.
   1992 	 */
   1993 	if (sc->sc_type < WM_T_82543) {
   1994 		/* We don't really know the bus characteristics here. */
   1995 		sc->sc_bus_speed = 33;
   1996 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1997 		/*
   1998 		 * CSA (Communication Streaming Architecture) is about as fast
   1999 		 * a 32-bit 66MHz PCI Bus.
   2000 		 */
   2001 		sc->sc_flags |= WM_F_CSA;
   2002 		sc->sc_bus_speed = 66;
   2003 		aprint_verbose_dev(sc->sc_dev,
   2004 		    "Communication Streaming Architecture\n");
   2005 		if (sc->sc_type == WM_T_82547) {
   2006 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2007 			callout_setfunc(&sc->sc_txfifo_ch,
   2008 			    wm_82547_txfifo_stall, sc);
   2009 			aprint_verbose_dev(sc->sc_dev,
   2010 			    "using 82547 Tx FIFO stall work-around\n");
   2011 		}
   2012 	} else if (sc->sc_type >= WM_T_82571) {
   2013 		sc->sc_flags |= WM_F_PCIE;
   2014 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2015 		    && (sc->sc_type != WM_T_ICH10)
   2016 		    && (sc->sc_type != WM_T_PCH)
   2017 		    && (sc->sc_type != WM_T_PCH2)
   2018 		    && (sc->sc_type != WM_T_PCH_LPT)
   2019 		    && (sc->sc_type != WM_T_PCH_SPT)
   2020 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2021 			/* ICH* and PCH* have no PCIe capability registers */
   2022 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2023 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2024 				NULL) == 0)
   2025 				aprint_error_dev(sc->sc_dev,
   2026 				    "unable to find PCIe capability\n");
   2027 		}
   2028 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2029 	} else {
   2030 		reg = CSR_READ(sc, WMREG_STATUS);
   2031 		if (reg & STATUS_BUS64)
   2032 			sc->sc_flags |= WM_F_BUS64;
   2033 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2034 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2035 
   2036 			sc->sc_flags |= WM_F_PCIX;
   2037 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2038 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2039 				aprint_error_dev(sc->sc_dev,
   2040 				    "unable to find PCIX capability\n");
   2041 			else if (sc->sc_type != WM_T_82545_3 &&
   2042 				 sc->sc_type != WM_T_82546_3) {
   2043 				/*
   2044 				 * Work around a problem caused by the BIOS
   2045 				 * setting the max memory read byte count
   2046 				 * incorrectly.
   2047 				 */
   2048 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2049 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2050 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2051 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2052 
   2053 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2054 				    PCIX_CMD_BYTECNT_SHIFT;
   2055 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2056 				    PCIX_STATUS_MAXB_SHIFT;
   2057 				if (bytecnt > maxb) {
   2058 					aprint_verbose_dev(sc->sc_dev,
   2059 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2060 					    512 << bytecnt, 512 << maxb);
   2061 					pcix_cmd = (pcix_cmd &
   2062 					    ~PCIX_CMD_BYTECNT_MASK) |
   2063 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2064 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2065 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2066 					    pcix_cmd);
   2067 				}
   2068 			}
   2069 		}
   2070 		/*
   2071 		 * The quad port adapter is special; it has a PCIX-PCIX
   2072 		 * bridge on the board, and can run the secondary bus at
   2073 		 * a higher speed.
   2074 		 */
   2075 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2076 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2077 								      : 66;
   2078 		} else if (sc->sc_flags & WM_F_PCIX) {
   2079 			switch (reg & STATUS_PCIXSPD_MASK) {
   2080 			case STATUS_PCIXSPD_50_66:
   2081 				sc->sc_bus_speed = 66;
   2082 				break;
   2083 			case STATUS_PCIXSPD_66_100:
   2084 				sc->sc_bus_speed = 100;
   2085 				break;
   2086 			case STATUS_PCIXSPD_100_133:
   2087 				sc->sc_bus_speed = 133;
   2088 				break;
   2089 			default:
   2090 				aprint_error_dev(sc->sc_dev,
   2091 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2092 				    reg & STATUS_PCIXSPD_MASK);
   2093 				sc->sc_bus_speed = 66;
   2094 				break;
   2095 			}
   2096 		} else
   2097 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2098 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2099 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2100 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2101 	}
   2102 
   2103 	/* clear interesting stat counters */
   2104 	CSR_READ(sc, WMREG_COLC);
   2105 	CSR_READ(sc, WMREG_RXERRC);
   2106 
   2107 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2108 	    || (sc->sc_type >= WM_T_ICH8))
   2109 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2110 	if (sc->sc_type >= WM_T_ICH8)
   2111 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2112 
   2113 	/* Set PHY, NVM mutex related stuff */
   2114 	switch (sc->sc_type) {
   2115 	case WM_T_82542_2_0:
   2116 	case WM_T_82542_2_1:
   2117 	case WM_T_82543:
   2118 	case WM_T_82544:
   2119 		/* Microwire */
   2120 		sc->nvm.read = wm_nvm_read_uwire;
   2121 		sc->sc_nvm_wordsize = 64;
   2122 		sc->sc_nvm_addrbits = 6;
   2123 		break;
   2124 	case WM_T_82540:
   2125 	case WM_T_82545:
   2126 	case WM_T_82545_3:
   2127 	case WM_T_82546:
   2128 	case WM_T_82546_3:
   2129 		/* Microwire */
   2130 		sc->nvm.read = wm_nvm_read_uwire;
   2131 		reg = CSR_READ(sc, WMREG_EECD);
   2132 		if (reg & EECD_EE_SIZE) {
   2133 			sc->sc_nvm_wordsize = 256;
   2134 			sc->sc_nvm_addrbits = 8;
   2135 		} else {
   2136 			sc->sc_nvm_wordsize = 64;
   2137 			sc->sc_nvm_addrbits = 6;
   2138 		}
   2139 		sc->sc_flags |= WM_F_LOCK_EECD;
   2140 		sc->nvm.acquire = wm_get_eecd;
   2141 		sc->nvm.release = wm_put_eecd;
   2142 		break;
   2143 	case WM_T_82541:
   2144 	case WM_T_82541_2:
   2145 	case WM_T_82547:
   2146 	case WM_T_82547_2:
   2147 		reg = CSR_READ(sc, WMREG_EECD);
   2148 		/*
   2149 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2150 		 * on 8254[17], so set flags and functios before calling it.
   2151 		 */
   2152 		sc->sc_flags |= WM_F_LOCK_EECD;
   2153 		sc->nvm.acquire = wm_get_eecd;
   2154 		sc->nvm.release = wm_put_eecd;
   2155 		if (reg & EECD_EE_TYPE) {
   2156 			/* SPI */
   2157 			sc->nvm.read = wm_nvm_read_spi;
   2158 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2159 			wm_nvm_set_addrbits_size_eecd(sc);
   2160 		} else {
   2161 			/* Microwire */
   2162 			sc->nvm.read = wm_nvm_read_uwire;
   2163 			if ((reg & EECD_EE_ABITS) != 0) {
   2164 				sc->sc_nvm_wordsize = 256;
   2165 				sc->sc_nvm_addrbits = 8;
   2166 			} else {
   2167 				sc->sc_nvm_wordsize = 64;
   2168 				sc->sc_nvm_addrbits = 6;
   2169 			}
   2170 		}
   2171 		break;
   2172 	case WM_T_82571:
   2173 	case WM_T_82572:
   2174 		/* SPI */
   2175 		sc->nvm.read = wm_nvm_read_eerd;
   2176 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2177 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2178 		wm_nvm_set_addrbits_size_eecd(sc);
   2179 		sc->phy.acquire = wm_get_swsm_semaphore;
   2180 		sc->phy.release = wm_put_swsm_semaphore;
   2181 		sc->nvm.acquire = wm_get_nvm_82571;
   2182 		sc->nvm.release = wm_put_nvm_82571;
   2183 		break;
   2184 	case WM_T_82573:
   2185 	case WM_T_82574:
   2186 	case WM_T_82583:
   2187 		sc->nvm.read = wm_nvm_read_eerd;
   2188 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2189 		if (sc->sc_type == WM_T_82573) {
   2190 			sc->phy.acquire = wm_get_swsm_semaphore;
   2191 			sc->phy.release = wm_put_swsm_semaphore;
   2192 			sc->nvm.acquire = wm_get_nvm_82571;
   2193 			sc->nvm.release = wm_put_nvm_82571;
   2194 		} else {
   2195 			/* Both PHY and NVM use the same semaphore. */
   2196 			sc->phy.acquire = sc->nvm.acquire
   2197 			    = wm_get_swfwhw_semaphore;
   2198 			sc->phy.release = sc->nvm.release
   2199 			    = wm_put_swfwhw_semaphore;
   2200 		}
   2201 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2202 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2203 			sc->sc_nvm_wordsize = 2048;
   2204 		} else {
   2205 			/* SPI */
   2206 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2207 			wm_nvm_set_addrbits_size_eecd(sc);
   2208 		}
   2209 		break;
   2210 	case WM_T_82575:
   2211 	case WM_T_82576:
   2212 	case WM_T_82580:
   2213 	case WM_T_I350:
   2214 	case WM_T_I354:
   2215 	case WM_T_80003:
   2216 		/* SPI */
   2217 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2218 		wm_nvm_set_addrbits_size_eecd(sc);
   2219 		if ((sc->sc_type == WM_T_80003)
   2220 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2221 			sc->nvm.read = wm_nvm_read_eerd;
   2222 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2223 		} else {
   2224 			sc->nvm.read = wm_nvm_read_spi;
   2225 			sc->sc_flags |= WM_F_LOCK_EECD;
   2226 		}
   2227 		sc->phy.acquire = wm_get_phy_82575;
   2228 		sc->phy.release = wm_put_phy_82575;
   2229 		sc->nvm.acquire = wm_get_nvm_80003;
   2230 		sc->nvm.release = wm_put_nvm_80003;
   2231 		break;
   2232 	case WM_T_ICH8:
   2233 	case WM_T_ICH9:
   2234 	case WM_T_ICH10:
   2235 	case WM_T_PCH:
   2236 	case WM_T_PCH2:
   2237 	case WM_T_PCH_LPT:
   2238 		sc->nvm.read = wm_nvm_read_ich8;
   2239 		/* FLASH */
   2240 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2241 		sc->sc_nvm_wordsize = 2048;
   2242 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2243 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2244 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2245 			aprint_error_dev(sc->sc_dev,
   2246 			    "can't map FLASH registers\n");
   2247 			goto out;
   2248 		}
   2249 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2250 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2251 		    ICH_FLASH_SECTOR_SIZE;
   2252 		sc->sc_ich8_flash_bank_size =
   2253 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2254 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2255 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2256 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2257 		sc->sc_flashreg_offset = 0;
   2258 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2259 		sc->phy.release = wm_put_swflag_ich8lan;
   2260 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2261 		sc->nvm.release = wm_put_nvm_ich8lan;
   2262 		break;
   2263 	case WM_T_PCH_SPT:
   2264 	case WM_T_PCH_CNP:
   2265 		sc->nvm.read = wm_nvm_read_spt;
   2266 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2267 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2268 		sc->sc_flasht = sc->sc_st;
   2269 		sc->sc_flashh = sc->sc_sh;
   2270 		sc->sc_ich8_flash_base = 0;
   2271 		sc->sc_nvm_wordsize =
   2272 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2273 		    * NVM_SIZE_MULTIPLIER;
   2274 		/* It is size in bytes, we want words */
   2275 		sc->sc_nvm_wordsize /= 2;
   2276 		/* assume 2 banks */
   2277 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2278 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2279 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2280 		sc->phy.release = wm_put_swflag_ich8lan;
   2281 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2282 		sc->nvm.release = wm_put_nvm_ich8lan;
   2283 		break;
   2284 	case WM_T_I210:
   2285 	case WM_T_I211:
   2286 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2287 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2288 		if (wm_nvm_flash_presence_i210(sc)) {
   2289 			sc->nvm.read = wm_nvm_read_eerd;
   2290 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2291 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2292 			wm_nvm_set_addrbits_size_eecd(sc);
   2293 		} else {
   2294 			sc->nvm.read = wm_nvm_read_invm;
   2295 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2296 			sc->sc_nvm_wordsize = INVM_SIZE;
   2297 		}
   2298 		sc->phy.acquire = wm_get_phy_82575;
   2299 		sc->phy.release = wm_put_phy_82575;
   2300 		sc->nvm.acquire = wm_get_nvm_80003;
   2301 		sc->nvm.release = wm_put_nvm_80003;
   2302 		break;
   2303 	default:
   2304 		break;
   2305 	}
   2306 
   2307 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2308 	switch (sc->sc_type) {
   2309 	case WM_T_82571:
   2310 	case WM_T_82572:
   2311 		reg = CSR_READ(sc, WMREG_SWSM2);
   2312 		if ((reg & SWSM2_LOCK) == 0) {
   2313 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2314 			force_clear_smbi = true;
   2315 		} else
   2316 			force_clear_smbi = false;
   2317 		break;
   2318 	case WM_T_82573:
   2319 	case WM_T_82574:
   2320 	case WM_T_82583:
   2321 		force_clear_smbi = true;
   2322 		break;
   2323 	default:
   2324 		force_clear_smbi = false;
   2325 		break;
   2326 	}
   2327 	if (force_clear_smbi) {
   2328 		reg = CSR_READ(sc, WMREG_SWSM);
   2329 		if ((reg & SWSM_SMBI) != 0)
   2330 			aprint_error_dev(sc->sc_dev,
   2331 			    "Please update the Bootagent\n");
   2332 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2333 	}
   2334 
   2335 	/*
   2336 	 * Defer printing the EEPROM type until after verifying the checksum
   2337 	 * This allows the EEPROM type to be printed correctly in the case
   2338 	 * that no EEPROM is attached.
   2339 	 */
   2340 	/*
   2341 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2342 	 * this for later, so we can fail future reads from the EEPROM.
   2343 	 */
   2344 	if (wm_nvm_validate_checksum(sc)) {
   2345 		/*
   2346 		 * Read twice again because some PCI-e parts fail the
   2347 		 * first check due to the link being in sleep state.
   2348 		 */
   2349 		if (wm_nvm_validate_checksum(sc))
   2350 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2351 	}
   2352 
   2353 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2354 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2355 	else {
   2356 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2357 		    sc->sc_nvm_wordsize);
   2358 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2359 			aprint_verbose("iNVM");
   2360 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2361 			aprint_verbose("FLASH(HW)");
   2362 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2363 			aprint_verbose("FLASH");
   2364 		else {
   2365 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2366 				eetype = "SPI";
   2367 			else
   2368 				eetype = "MicroWire";
   2369 			aprint_verbose("(%d address bits) %s EEPROM",
   2370 			    sc->sc_nvm_addrbits, eetype);
   2371 		}
   2372 	}
   2373 	wm_nvm_version(sc);
   2374 	aprint_verbose("\n");
   2375 
   2376 	/*
   2377 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2378 	 * incorrect.
   2379 	 */
   2380 	wm_gmii_setup_phytype(sc, 0, 0);
   2381 
   2382 	/* check for WM_F_WOL on some chips before wm_reset() */
   2383 	switch (sc->sc_type) {
   2384 	case WM_T_ICH8:
   2385 	case WM_T_ICH9:
   2386 	case WM_T_ICH10:
   2387 	case WM_T_PCH:
   2388 	case WM_T_PCH2:
   2389 	case WM_T_PCH_LPT:
   2390 	case WM_T_PCH_SPT:
   2391 	case WM_T_PCH_CNP:
   2392 		apme_mask = WUC_APME;
   2393 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2394 		break;
   2395 	default:
   2396 		break;
   2397 	}
   2398 	/* Reset the chip to a known state. */
   2399 	wm_reset(sc);
   2400 
   2401 	/*
   2402 	 * Check for I21[01] PLL workaround.
   2403 	 *
   2404 	 * Three cases:
   2405 	 * a) Chip is I211.
   2406 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2407 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2408 	 */
   2409 	if (sc->sc_type == WM_T_I211)
   2410 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2411 	if (sc->sc_type == WM_T_I210) {
   2412 		if (!wm_nvm_flash_presence_i210(sc))
   2413 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2414 		else if ((sc->sc_nvm_ver_major < 3)
   2415 		    || ((sc->sc_nvm_ver_major == 3)
   2416 			&& (sc->sc_nvm_ver_minor < 25))) {
   2417 			aprint_verbose_dev(sc->sc_dev,
   2418 			    "ROM image version %d.%d is older than 3.25\n",
   2419 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2420 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2421 		}
   2422 	}
   2423 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2424 		wm_pll_workaround_i210(sc);
   2425 
   2426 	wm_get_wakeup(sc);
   2427 
   2428 	/* Non-AMT based hardware can now take control from firmware */
   2429 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2430 		wm_get_hw_control(sc);
   2431 
   2432 	/*
   2433 	 * Read the Ethernet address from the EEPROM, if not first found
   2434 	 * in device properties.
   2435 	 */
   2436 	ea = prop_dictionary_get(dict, "mac-address");
   2437 	if (ea != NULL) {
   2438 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2439 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2440 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2441 	} else {
   2442 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2443 			aprint_error_dev(sc->sc_dev,
   2444 			    "unable to read Ethernet address\n");
   2445 			goto out;
   2446 		}
   2447 	}
   2448 
   2449 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2450 	    ether_sprintf(enaddr));
   2451 
   2452 	/*
   2453 	 * Read the config info from the EEPROM, and set up various
   2454 	 * bits in the control registers based on their contents.
   2455 	 */
   2456 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2457 	if (pn != NULL) {
   2458 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2459 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2460 	} else {
   2461 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2462 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2463 			goto out;
   2464 		}
   2465 	}
   2466 
   2467 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2468 	if (pn != NULL) {
   2469 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2470 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2471 	} else {
   2472 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2473 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2474 			goto out;
   2475 		}
   2476 	}
   2477 
   2478 	/* check for WM_F_WOL */
   2479 	switch (sc->sc_type) {
   2480 	case WM_T_82542_2_0:
   2481 	case WM_T_82542_2_1:
   2482 	case WM_T_82543:
   2483 		/* dummy? */
   2484 		eeprom_data = 0;
   2485 		apme_mask = NVM_CFG3_APME;
   2486 		break;
   2487 	case WM_T_82544:
   2488 		apme_mask = NVM_CFG2_82544_APM_EN;
   2489 		eeprom_data = cfg2;
   2490 		break;
   2491 	case WM_T_82546:
   2492 	case WM_T_82546_3:
   2493 	case WM_T_82571:
   2494 	case WM_T_82572:
   2495 	case WM_T_82573:
   2496 	case WM_T_82574:
   2497 	case WM_T_82583:
   2498 	case WM_T_80003:
   2499 	case WM_T_82575:
   2500 	case WM_T_82576:
   2501 		apme_mask = NVM_CFG3_APME;
   2502 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2503 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2504 		break;
   2505 	case WM_T_82580:
   2506 	case WM_T_I350:
   2507 	case WM_T_I354:
   2508 	case WM_T_I210:
   2509 	case WM_T_I211:
   2510 		apme_mask = NVM_CFG3_APME;
   2511 		wm_nvm_read(sc,
   2512 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2513 		    1, &eeprom_data);
   2514 		break;
   2515 	case WM_T_ICH8:
   2516 	case WM_T_ICH9:
   2517 	case WM_T_ICH10:
   2518 	case WM_T_PCH:
   2519 	case WM_T_PCH2:
   2520 	case WM_T_PCH_LPT:
   2521 	case WM_T_PCH_SPT:
   2522 	case WM_T_PCH_CNP:
   2523 		/* Already checked before wm_reset () */
   2524 		apme_mask = eeprom_data = 0;
   2525 		break;
   2526 	default: /* XXX 82540 */
   2527 		apme_mask = NVM_CFG3_APME;
   2528 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2529 		break;
   2530 	}
   2531 
   2532 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2533 	if ((eeprom_data & apme_mask) != 0)
   2534 		sc->sc_flags |= WM_F_WOL;
   2535 
   2536 	/*
   2537 	 * We have the eeprom settings, now apply the special cases
   2538 	 * where the eeprom may be wrong or the board won't support
   2539 	 * wake on lan on a particular port
   2540 	 */
   2541 	switch (sc->sc_pcidevid) {
   2542 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2543 		sc->sc_flags &= ~WM_F_WOL;
   2544 		break;
   2545 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2546 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2547 		/* Wake events only supported on port A for dual fiber
   2548 		 * regardless of eeprom setting */
   2549 		if (sc->sc_funcid == 1)
   2550 			sc->sc_flags &= ~WM_F_WOL;
   2551 		break;
   2552 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2553 		/* if quad port adapter, disable WoL on all but port A */
   2554 		if (sc->sc_funcid != 0)
   2555 			sc->sc_flags &= ~WM_F_WOL;
   2556 		break;
   2557 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2558 		/* Wake events only supported on port A for dual fiber
   2559 		 * regardless of eeprom setting */
   2560 		if (sc->sc_funcid == 1)
   2561 			sc->sc_flags &= ~WM_F_WOL;
   2562 		break;
   2563 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2564 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2565 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2566 		/* if quad port adapter, disable WoL on all but port A */
   2567 		if (sc->sc_funcid != 0)
   2568 			sc->sc_flags &= ~WM_F_WOL;
   2569 		break;
   2570 	}
   2571 
   2572 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2573 		/* Check NVM for autonegotiation */
   2574 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2575 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2576 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2577 		}
   2578 	}
   2579 
   2580 	/*
   2581 	 * XXX need special handling for some multiple port cards
   2582 	 * to disable a paticular port.
   2583 	 */
   2584 
   2585 	if (sc->sc_type >= WM_T_82544) {
   2586 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2587 		if (pn != NULL) {
   2588 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2589 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2590 		} else {
   2591 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2592 				aprint_error_dev(sc->sc_dev,
   2593 				    "unable to read SWDPIN\n");
   2594 				goto out;
   2595 			}
   2596 		}
   2597 	}
   2598 
   2599 	if (cfg1 & NVM_CFG1_ILOS)
   2600 		sc->sc_ctrl |= CTRL_ILOS;
   2601 
   2602 	/*
   2603 	 * XXX
   2604 	 * This code isn't correct because pin 2 and 3 are located
   2605 	 * in different position on newer chips. Check all datasheet.
   2606 	 *
   2607 	 * Until resolve this problem, check if a chip < 82580
   2608 	 */
   2609 	if (sc->sc_type <= WM_T_82580) {
   2610 		if (sc->sc_type >= WM_T_82544) {
   2611 			sc->sc_ctrl |=
   2612 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2613 			    CTRL_SWDPIO_SHIFT;
   2614 			sc->sc_ctrl |=
   2615 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2616 			    CTRL_SWDPINS_SHIFT;
   2617 		} else {
   2618 			sc->sc_ctrl |=
   2619 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2620 			    CTRL_SWDPIO_SHIFT;
   2621 		}
   2622 	}
   2623 
   2624 	/* XXX For other than 82580? */
   2625 	if (sc->sc_type == WM_T_82580) {
   2626 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2627 		if (nvmword & __BIT(13))
   2628 			sc->sc_ctrl |= CTRL_ILOS;
   2629 	}
   2630 
   2631 #if 0
   2632 	if (sc->sc_type >= WM_T_82544) {
   2633 		if (cfg1 & NVM_CFG1_IPS0)
   2634 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2635 		if (cfg1 & NVM_CFG1_IPS1)
   2636 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2637 		sc->sc_ctrl_ext |=
   2638 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2639 		    CTRL_EXT_SWDPIO_SHIFT;
   2640 		sc->sc_ctrl_ext |=
   2641 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2642 		    CTRL_EXT_SWDPINS_SHIFT;
   2643 	} else {
   2644 		sc->sc_ctrl_ext |=
   2645 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2646 		    CTRL_EXT_SWDPIO_SHIFT;
   2647 	}
   2648 #endif
   2649 
   2650 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2651 #if 0
   2652 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2653 #endif
   2654 
   2655 	if (sc->sc_type == WM_T_PCH) {
   2656 		uint16_t val;
   2657 
   2658 		/* Save the NVM K1 bit setting */
   2659 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2660 
   2661 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2662 			sc->sc_nvm_k1_enabled = 1;
   2663 		else
   2664 			sc->sc_nvm_k1_enabled = 0;
   2665 	}
   2666 
   2667 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2668 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2669 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2670 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2671 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2672 	    || sc->sc_type == WM_T_82573
   2673 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2674 		/* Copper only */
   2675 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2676 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2677 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2678 	    || (sc->sc_type ==WM_T_I211)) {
   2679 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2680 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2681 		switch (link_mode) {
   2682 		case CTRL_EXT_LINK_MODE_1000KX:
   2683 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2684 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2685 			break;
   2686 		case CTRL_EXT_LINK_MODE_SGMII:
   2687 			if (wm_sgmii_uses_mdio(sc)) {
   2688 				aprint_verbose_dev(sc->sc_dev,
   2689 				    "SGMII(MDIO)\n");
   2690 				sc->sc_flags |= WM_F_SGMII;
   2691 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2692 				break;
   2693 			}
   2694 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2695 			/*FALLTHROUGH*/
   2696 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2697 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2698 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2699 				if (link_mode
   2700 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2701 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2702 					sc->sc_flags |= WM_F_SGMII;
   2703 				} else {
   2704 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2705 					aprint_verbose_dev(sc->sc_dev,
   2706 					    "SERDES\n");
   2707 				}
   2708 				break;
   2709 			}
   2710 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2711 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2712 
   2713 			/* Change current link mode setting */
   2714 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2715 			switch (sc->sc_mediatype) {
   2716 			case WM_MEDIATYPE_COPPER:
   2717 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2718 				break;
   2719 			case WM_MEDIATYPE_SERDES:
   2720 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2721 				break;
   2722 			default:
   2723 				break;
   2724 			}
   2725 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2726 			break;
   2727 		case CTRL_EXT_LINK_MODE_GMII:
   2728 		default:
   2729 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2730 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2731 			break;
   2732 		}
   2733 
   2734 		reg &= ~CTRL_EXT_I2C_ENA;
   2735 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2736 			reg |= CTRL_EXT_I2C_ENA;
   2737 		else
   2738 			reg &= ~CTRL_EXT_I2C_ENA;
   2739 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2740 	} else if (sc->sc_type < WM_T_82543 ||
   2741 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2742 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2743 			aprint_error_dev(sc->sc_dev,
   2744 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2745 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2746 		}
   2747 	} else {
   2748 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2749 			aprint_error_dev(sc->sc_dev,
   2750 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2751 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2752 		}
   2753 	}
   2754 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2755 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2756 
   2757 	/* Set device properties (macflags) */
   2758 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2759 
   2760 	/* Initialize the media structures accordingly. */
   2761 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2762 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2763 	else
   2764 		wm_tbi_mediainit(sc); /* All others */
   2765 
   2766 	ifp = &sc->sc_ethercom.ec_if;
   2767 	xname = device_xname(sc->sc_dev);
   2768 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2769 	ifp->if_softc = sc;
   2770 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2771 #ifdef WM_MPSAFE
   2772 	ifp->if_extflags = IFEF_MPSAFE;
   2773 #endif
   2774 	ifp->if_ioctl = wm_ioctl;
   2775 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2776 		ifp->if_start = wm_nq_start;
   2777 		/*
   2778 		 * When the number of CPUs is one and the controller can use
   2779 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2780 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2781 		 * and the other is used for link status changing.
   2782 		 * In this situation, wm_nq_transmit() is disadvantageous
   2783 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2784 		 */
   2785 		if (wm_is_using_multiqueue(sc))
   2786 			ifp->if_transmit = wm_nq_transmit;
   2787 	} else {
   2788 		ifp->if_start = wm_start;
   2789 		/*
   2790 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2791 		 */
   2792 		if (wm_is_using_multiqueue(sc))
   2793 			ifp->if_transmit = wm_transmit;
   2794 	}
   2795 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2796 	ifp->if_init = wm_init;
   2797 	ifp->if_stop = wm_stop;
   2798 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2799 	IFQ_SET_READY(&ifp->if_snd);
   2800 
   2801 	/* Check for jumbo frame */
   2802 	switch (sc->sc_type) {
   2803 	case WM_T_82573:
   2804 		/* XXX limited to 9234 if ASPM is disabled */
   2805 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2806 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2807 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2808 		break;
   2809 	case WM_T_82571:
   2810 	case WM_T_82572:
   2811 	case WM_T_82574:
   2812 	case WM_T_82583:
   2813 	case WM_T_82575:
   2814 	case WM_T_82576:
   2815 	case WM_T_82580:
   2816 	case WM_T_I350:
   2817 	case WM_T_I354:
   2818 	case WM_T_I210:
   2819 	case WM_T_I211:
   2820 	case WM_T_80003:
   2821 	case WM_T_ICH9:
   2822 	case WM_T_ICH10:
   2823 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2824 	case WM_T_PCH_LPT:
   2825 	case WM_T_PCH_SPT:
   2826 	case WM_T_PCH_CNP:
   2827 		/* XXX limited to 9234 */
   2828 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2829 		break;
   2830 	case WM_T_PCH:
   2831 		/* XXX limited to 4096 */
   2832 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2833 		break;
   2834 	case WM_T_82542_2_0:
   2835 	case WM_T_82542_2_1:
   2836 	case WM_T_ICH8:
   2837 		/* No support for jumbo frame */
   2838 		break;
   2839 	default:
   2840 		/* ETHER_MAX_LEN_JUMBO */
   2841 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2842 		break;
   2843 	}
   2844 
   2845 	/* If we're a i82543 or greater, we can support VLANs. */
   2846 	if (sc->sc_type >= WM_T_82543)
   2847 		sc->sc_ethercom.ec_capabilities |=
   2848 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2849 
   2850 	/*
   2851 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2852 	 * on i82543 and later.
   2853 	 */
   2854 	if (sc->sc_type >= WM_T_82543) {
   2855 		ifp->if_capabilities |=
   2856 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2857 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2858 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2859 		    IFCAP_CSUM_TCPv6_Tx |
   2860 		    IFCAP_CSUM_UDPv6_Tx;
   2861 	}
   2862 
   2863 	/*
   2864 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2865 	 *
   2866 	 *	82541GI (8086:1076) ... no
   2867 	 *	82572EI (8086:10b9) ... yes
   2868 	 */
   2869 	if (sc->sc_type >= WM_T_82571) {
   2870 		ifp->if_capabilities |=
   2871 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2872 	}
   2873 
   2874 	/*
   2875 	 * If we're a i82544 or greater (except i82547), we can do
   2876 	 * TCP segmentation offload.
   2877 	 */
   2878 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2879 		ifp->if_capabilities |= IFCAP_TSOv4;
   2880 	}
   2881 
   2882 	if (sc->sc_type >= WM_T_82571) {
   2883 		ifp->if_capabilities |= IFCAP_TSOv6;
   2884 	}
   2885 
   2886 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2887 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2888 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2889 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2890 
   2891 #ifdef WM_MPSAFE
   2892 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2893 #else
   2894 	sc->sc_core_lock = NULL;
   2895 #endif
   2896 
   2897 	/* Attach the interface. */
   2898 	error = if_initialize(ifp);
   2899 	if (error != 0) {
   2900 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2901 		    error);
   2902 		return; /* Error */
   2903 	}
   2904 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2905 	ether_ifattach(ifp, enaddr);
   2906 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2907 	if_register(ifp);
   2908 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2909 	    RND_FLAG_DEFAULT);
   2910 
   2911 #ifdef WM_EVENT_COUNTERS
   2912 	/* Attach event counters. */
   2913 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2914 	    NULL, xname, "linkintr");
   2915 
   2916 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2917 	    NULL, xname, "tx_xoff");
   2918 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2919 	    NULL, xname, "tx_xon");
   2920 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2921 	    NULL, xname, "rx_xoff");
   2922 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2923 	    NULL, xname, "rx_xon");
   2924 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2925 	    NULL, xname, "rx_macctl");
   2926 #endif /* WM_EVENT_COUNTERS */
   2927 
   2928 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2929 		pmf_class_network_register(self, ifp);
   2930 	else
   2931 		aprint_error_dev(self, "couldn't establish power handler\n");
   2932 
   2933 	sc->sc_flags |= WM_F_ATTACHED;
   2934  out:
   2935 	return;
   2936 }
   2937 
   2938 /* The detach function (ca_detach) */
   2939 static int
   2940 wm_detach(device_t self, int flags __unused)
   2941 {
   2942 	struct wm_softc *sc = device_private(self);
   2943 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2944 	int i;
   2945 
   2946 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2947 		return 0;
   2948 
   2949 	/* Stop the interface. Callouts are stopped in it. */
   2950 	wm_stop(ifp, 1);
   2951 
   2952 	pmf_device_deregister(self);
   2953 
   2954 #ifdef WM_EVENT_COUNTERS
   2955 	evcnt_detach(&sc->sc_ev_linkintr);
   2956 
   2957 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2958 	evcnt_detach(&sc->sc_ev_tx_xon);
   2959 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2960 	evcnt_detach(&sc->sc_ev_rx_xon);
   2961 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2962 #endif /* WM_EVENT_COUNTERS */
   2963 
   2964 	/* Tell the firmware about the release */
   2965 	WM_CORE_LOCK(sc);
   2966 	wm_release_manageability(sc);
   2967 	wm_release_hw_control(sc);
   2968 	wm_enable_wakeup(sc);
   2969 	WM_CORE_UNLOCK(sc);
   2970 
   2971 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2972 
   2973 	/* Delete all remaining media. */
   2974 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2975 
   2976 	ether_ifdetach(ifp);
   2977 	if_detach(ifp);
   2978 	if_percpuq_destroy(sc->sc_ipq);
   2979 
   2980 	/* Unload RX dmamaps and free mbufs */
   2981 	for (i = 0; i < sc->sc_nqueues; i++) {
   2982 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2983 		mutex_enter(rxq->rxq_lock);
   2984 		wm_rxdrain(rxq);
   2985 		mutex_exit(rxq->rxq_lock);
   2986 	}
   2987 	/* Must unlock here */
   2988 
   2989 	/* Disestablish the interrupt handler */
   2990 	for (i = 0; i < sc->sc_nintrs; i++) {
   2991 		if (sc->sc_ihs[i] != NULL) {
   2992 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2993 			sc->sc_ihs[i] = NULL;
   2994 		}
   2995 	}
   2996 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2997 
   2998 	wm_free_txrx_queues(sc);
   2999 
   3000 	/* Unmap the registers */
   3001 	if (sc->sc_ss) {
   3002 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3003 		sc->sc_ss = 0;
   3004 	}
   3005 	if (sc->sc_ios) {
   3006 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3007 		sc->sc_ios = 0;
   3008 	}
   3009 	if (sc->sc_flashs) {
   3010 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3011 		sc->sc_flashs = 0;
   3012 	}
   3013 
   3014 	if (sc->sc_core_lock)
   3015 		mutex_obj_free(sc->sc_core_lock);
   3016 	if (sc->sc_ich_phymtx)
   3017 		mutex_obj_free(sc->sc_ich_phymtx);
   3018 	if (sc->sc_ich_nvmmtx)
   3019 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3020 
   3021 	return 0;
   3022 }
   3023 
   3024 static bool
   3025 wm_suspend(device_t self, const pmf_qual_t *qual)
   3026 {
   3027 	struct wm_softc *sc = device_private(self);
   3028 
   3029 	wm_release_manageability(sc);
   3030 	wm_release_hw_control(sc);
   3031 	wm_enable_wakeup(sc);
   3032 
   3033 	return true;
   3034 }
   3035 
   3036 static bool
   3037 wm_resume(device_t self, const pmf_qual_t *qual)
   3038 {
   3039 	struct wm_softc *sc = device_private(self);
   3040 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3041 	pcireg_t reg;
   3042 	char buf[256];
   3043 
   3044 	reg = CSR_READ(sc, WMREG_WUS);
   3045 	if (reg != 0) {
   3046 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3047 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3048 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3049 	}
   3050 
   3051 	if (sc->sc_type >= WM_T_PCH2)
   3052 		wm_resume_workarounds_pchlan(sc);
   3053 	if ((ifp->if_flags & IFF_UP) == 0) {
   3054 		wm_reset(sc);
   3055 		/* Non-AMT based hardware can now take control from firmware */
   3056 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3057 			wm_get_hw_control(sc);
   3058 		wm_init_manageability(sc);
   3059 	} else {
   3060 		/*
   3061 		 * We called pmf_class_network_register(), so if_init() is
   3062 		 * automatically called when IFF_UP. wm_reset(),
   3063 		 * wm_get_hw_control() and wm_init_manageability() are called
   3064 		 * via wm_init().
   3065 		 */
   3066 	}
   3067 
   3068 	return true;
   3069 }
   3070 
   3071 /*
   3072  * wm_watchdog:		[ifnet interface function]
   3073  *
   3074  *	Watchdog timer handler.
   3075  */
   3076 static void
   3077 wm_watchdog(struct ifnet *ifp)
   3078 {
   3079 	int qid;
   3080 	struct wm_softc *sc = ifp->if_softc;
   3081 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3082 
   3083 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3084 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3085 
   3086 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3087 	}
   3088 
   3089 	/*
   3090 	 * IF any of queues hanged up, reset the interface.
   3091 	 */
   3092 	if (hang_queue != 0) {
   3093 		(void) wm_init(ifp);
   3094 
   3095 		/*
   3096 		 * There are still some upper layer processing which call
   3097 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3098 		 */
   3099 		/* Try to get more packets going. */
   3100 		ifp->if_start(ifp);
   3101 	}
   3102 }
   3103 
   3104 
   3105 static void
   3106 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3107 {
   3108 
   3109 	mutex_enter(txq->txq_lock);
   3110 	if (txq->txq_sending &&
   3111 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3112 		wm_watchdog_txq_locked(ifp, txq, hang);
   3113 	}
   3114 	mutex_exit(txq->txq_lock);
   3115 }
   3116 
   3117 static void
   3118 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3119     uint16_t *hang)
   3120 {
   3121 	struct wm_softc *sc = ifp->if_softc;
   3122 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3123 
   3124 	KASSERT(mutex_owned(txq->txq_lock));
   3125 
   3126 	/*
   3127 	 * Since we're using delayed interrupts, sweep up
   3128 	 * before we report an error.
   3129 	 */
   3130 	wm_txeof(txq, UINT_MAX);
   3131 
   3132 	if (txq->txq_sending)
   3133 		*hang |= __BIT(wmq->wmq_id);
   3134 
   3135 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3136 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3137 		    device_xname(sc->sc_dev));
   3138 	} else {
   3139 #ifdef WM_DEBUG
   3140 		int i, j;
   3141 		struct wm_txsoft *txs;
   3142 #endif
   3143 		log(LOG_ERR,
   3144 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3145 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3146 		    txq->txq_next);
   3147 		ifp->if_oerrors++;
   3148 #ifdef WM_DEBUG
   3149 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3150 		    i = WM_NEXTTXS(txq, i)) {
   3151 		    txs = &txq->txq_soft[i];
   3152 		    printf("txs %d tx %d -> %d\n",
   3153 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3154 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3155 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3156 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3157 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3158 				    printf("\t %#08x%08x\n",
   3159 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3160 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3161 			    } else {
   3162 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3163 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3164 					txq->txq_descs[j].wtx_addr.wa_low);
   3165 				    printf("\t %#04x%02x%02x%08x\n",
   3166 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3167 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3168 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3169 					txq->txq_descs[j].wtx_cmdlen);
   3170 			    }
   3171 			if (j == txs->txs_lastdesc)
   3172 				break;
   3173 			}
   3174 		}
   3175 #endif
   3176 	}
   3177 }
   3178 
   3179 /*
   3180  * wm_tick:
   3181  *
   3182  *	One second timer, used to check link status, sweep up
   3183  *	completed transmit jobs, etc.
   3184  */
   3185 static void
   3186 wm_tick(void *arg)
   3187 {
   3188 	struct wm_softc *sc = arg;
   3189 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3190 #ifndef WM_MPSAFE
   3191 	int s = splnet();
   3192 #endif
   3193 
   3194 	WM_CORE_LOCK(sc);
   3195 
   3196 	if (sc->sc_core_stopping) {
   3197 		WM_CORE_UNLOCK(sc);
   3198 #ifndef WM_MPSAFE
   3199 		splx(s);
   3200 #endif
   3201 		return;
   3202 	}
   3203 
   3204 	if (sc->sc_type >= WM_T_82542_2_1) {
   3205 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3206 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3207 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3208 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3209 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3210 	}
   3211 
   3212 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3213 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3214 	    + CSR_READ(sc, WMREG_CRCERRS)
   3215 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3216 	    + CSR_READ(sc, WMREG_SYMERRC)
   3217 	    + CSR_READ(sc, WMREG_RXERRC)
   3218 	    + CSR_READ(sc, WMREG_SEC)
   3219 	    + CSR_READ(sc, WMREG_CEXTERR)
   3220 	    + CSR_READ(sc, WMREG_RLEC);
   3221 	/*
   3222 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3223 	 * memory. It does not mean the number of dropped packet. Because
   3224 	 * ethernet controller can receive packets in such case if there is
   3225 	 * space in phy's FIFO.
   3226 	 *
   3227 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3228 	 * own EVCNT instead of if_iqdrops.
   3229 	 */
   3230 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3231 
   3232 	if (sc->sc_flags & WM_F_HAS_MII)
   3233 		mii_tick(&sc->sc_mii);
   3234 	else if ((sc->sc_type >= WM_T_82575)
   3235 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3236 		wm_serdes_tick(sc);
   3237 	else
   3238 		wm_tbi_tick(sc);
   3239 
   3240 	WM_CORE_UNLOCK(sc);
   3241 
   3242 	wm_watchdog(ifp);
   3243 
   3244 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3245 }
   3246 
   3247 static int
   3248 wm_ifflags_cb(struct ethercom *ec)
   3249 {
   3250 	struct ifnet *ifp = &ec->ec_if;
   3251 	struct wm_softc *sc = ifp->if_softc;
   3252 	int rc = 0;
   3253 
   3254 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3255 		device_xname(sc->sc_dev), __func__));
   3256 
   3257 	WM_CORE_LOCK(sc);
   3258 
   3259 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3260 	sc->sc_if_flags = ifp->if_flags;
   3261 
   3262 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3263 		rc = ENETRESET;
   3264 		goto out;
   3265 	}
   3266 
   3267 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3268 		wm_set_filter(sc);
   3269 
   3270 	wm_set_vlan(sc);
   3271 
   3272 out:
   3273 	WM_CORE_UNLOCK(sc);
   3274 
   3275 	return rc;
   3276 }
   3277 
   3278 /*
   3279  * wm_ioctl:		[ifnet interface function]
   3280  *
   3281  *	Handle control requests from the operator.
   3282  */
   3283 static int
   3284 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3285 {
   3286 	struct wm_softc *sc = ifp->if_softc;
   3287 	struct ifreq *ifr = (struct ifreq *) data;
   3288 	struct ifaddr *ifa = (struct ifaddr *)data;
   3289 	struct sockaddr_dl *sdl;
   3290 	int s, error;
   3291 
   3292 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3293 		device_xname(sc->sc_dev), __func__));
   3294 
   3295 #ifndef WM_MPSAFE
   3296 	s = splnet();
   3297 #endif
   3298 	switch (cmd) {
   3299 	case SIOCSIFMEDIA:
   3300 	case SIOCGIFMEDIA:
   3301 		WM_CORE_LOCK(sc);
   3302 		/* Flow control requires full-duplex mode. */
   3303 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3304 		    (ifr->ifr_media & IFM_FDX) == 0)
   3305 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3306 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3307 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3308 				/* We can do both TXPAUSE and RXPAUSE. */
   3309 				ifr->ifr_media |=
   3310 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3311 			}
   3312 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3313 		}
   3314 		WM_CORE_UNLOCK(sc);
   3315 #ifdef WM_MPSAFE
   3316 		s = splnet();
   3317 #endif
   3318 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3319 #ifdef WM_MPSAFE
   3320 		splx(s);
   3321 #endif
   3322 		break;
   3323 	case SIOCINITIFADDR:
   3324 		WM_CORE_LOCK(sc);
   3325 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3326 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3327 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3328 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3329 			/* unicast address is first multicast entry */
   3330 			wm_set_filter(sc);
   3331 			error = 0;
   3332 			WM_CORE_UNLOCK(sc);
   3333 			break;
   3334 		}
   3335 		WM_CORE_UNLOCK(sc);
   3336 		/*FALLTHROUGH*/
   3337 	default:
   3338 #ifdef WM_MPSAFE
   3339 		s = splnet();
   3340 #endif
   3341 		/* It may call wm_start, so unlock here */
   3342 		error = ether_ioctl(ifp, cmd, data);
   3343 #ifdef WM_MPSAFE
   3344 		splx(s);
   3345 #endif
   3346 		if (error != ENETRESET)
   3347 			break;
   3348 
   3349 		error = 0;
   3350 
   3351 		if (cmd == SIOCSIFCAP)
   3352 			error = (*ifp->if_init)(ifp);
   3353 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3354 			;
   3355 		else if (ifp->if_flags & IFF_RUNNING) {
   3356 			/*
   3357 			 * Multicast list has changed; set the hardware filter
   3358 			 * accordingly.
   3359 			 */
   3360 			WM_CORE_LOCK(sc);
   3361 			wm_set_filter(sc);
   3362 			WM_CORE_UNLOCK(sc);
   3363 		}
   3364 		break;
   3365 	}
   3366 
   3367 #ifndef WM_MPSAFE
   3368 	splx(s);
   3369 #endif
   3370 	return error;
   3371 }
   3372 
   3373 /* MAC address related */
   3374 
   3375 /*
   3376  * Get the offset of MAC address and return it.
   3377  * If error occured, use offset 0.
   3378  */
   3379 static uint16_t
   3380 wm_check_alt_mac_addr(struct wm_softc *sc)
   3381 {
   3382 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3383 	uint16_t offset = NVM_OFF_MACADDR;
   3384 
   3385 	/* Try to read alternative MAC address pointer */
   3386 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3387 		return 0;
   3388 
   3389 	/* Check pointer if it's valid or not. */
   3390 	if ((offset == 0x0000) || (offset == 0xffff))
   3391 		return 0;
   3392 
   3393 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3394 	/*
   3395 	 * Check whether alternative MAC address is valid or not.
   3396 	 * Some cards have non 0xffff pointer but those don't use
   3397 	 * alternative MAC address in reality.
   3398 	 *
   3399 	 * Check whether the broadcast bit is set or not.
   3400 	 */
   3401 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3402 		if (((myea[0] & 0xff) & 0x01) == 0)
   3403 			return offset; /* Found */
   3404 
   3405 	/* Not found */
   3406 	return 0;
   3407 }
   3408 
   3409 static int
   3410 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3411 {
   3412 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3413 	uint16_t offset = NVM_OFF_MACADDR;
   3414 	int do_invert = 0;
   3415 
   3416 	switch (sc->sc_type) {
   3417 	case WM_T_82580:
   3418 	case WM_T_I350:
   3419 	case WM_T_I354:
   3420 		/* EEPROM Top Level Partitioning */
   3421 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3422 		break;
   3423 	case WM_T_82571:
   3424 	case WM_T_82575:
   3425 	case WM_T_82576:
   3426 	case WM_T_80003:
   3427 	case WM_T_I210:
   3428 	case WM_T_I211:
   3429 		offset = wm_check_alt_mac_addr(sc);
   3430 		if (offset == 0)
   3431 			if ((sc->sc_funcid & 0x01) == 1)
   3432 				do_invert = 1;
   3433 		break;
   3434 	default:
   3435 		if ((sc->sc_funcid & 0x01) == 1)
   3436 			do_invert = 1;
   3437 		break;
   3438 	}
   3439 
   3440 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3441 		goto bad;
   3442 
   3443 	enaddr[0] = myea[0] & 0xff;
   3444 	enaddr[1] = myea[0] >> 8;
   3445 	enaddr[2] = myea[1] & 0xff;
   3446 	enaddr[3] = myea[1] >> 8;
   3447 	enaddr[4] = myea[2] & 0xff;
   3448 	enaddr[5] = myea[2] >> 8;
   3449 
   3450 	/*
   3451 	 * Toggle the LSB of the MAC address on the second port
   3452 	 * of some dual port cards.
   3453 	 */
   3454 	if (do_invert != 0)
   3455 		enaddr[5] ^= 1;
   3456 
   3457 	return 0;
   3458 
   3459  bad:
   3460 	return -1;
   3461 }
   3462 
   3463 /*
   3464  * wm_set_ral:
   3465  *
   3466  *	Set an entery in the receive address list.
   3467  */
   3468 static void
   3469 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3470 {
   3471 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3472 	uint32_t wlock_mac;
   3473 	int rv;
   3474 
   3475 	if (enaddr != NULL) {
   3476 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3477 		    (enaddr[3] << 24);
   3478 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3479 		ral_hi |= RAL_AV;
   3480 	} else {
   3481 		ral_lo = 0;
   3482 		ral_hi = 0;
   3483 	}
   3484 
   3485 	switch (sc->sc_type) {
   3486 	case WM_T_82542_2_0:
   3487 	case WM_T_82542_2_1:
   3488 	case WM_T_82543:
   3489 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3490 		CSR_WRITE_FLUSH(sc);
   3491 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3492 		CSR_WRITE_FLUSH(sc);
   3493 		break;
   3494 	case WM_T_PCH2:
   3495 	case WM_T_PCH_LPT:
   3496 	case WM_T_PCH_SPT:
   3497 	case WM_T_PCH_CNP:
   3498 		if (idx == 0) {
   3499 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3500 			CSR_WRITE_FLUSH(sc);
   3501 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3502 			CSR_WRITE_FLUSH(sc);
   3503 			return;
   3504 		}
   3505 		if (sc->sc_type != WM_T_PCH2) {
   3506 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3507 			    FWSM_WLOCK_MAC);
   3508 			addrl = WMREG_SHRAL(idx - 1);
   3509 			addrh = WMREG_SHRAH(idx - 1);
   3510 		} else {
   3511 			wlock_mac = 0;
   3512 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3513 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3514 		}
   3515 
   3516 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3517 			rv = wm_get_swflag_ich8lan(sc);
   3518 			if (rv != 0)
   3519 				return;
   3520 			CSR_WRITE(sc, addrl, ral_lo);
   3521 			CSR_WRITE_FLUSH(sc);
   3522 			CSR_WRITE(sc, addrh, ral_hi);
   3523 			CSR_WRITE_FLUSH(sc);
   3524 			wm_put_swflag_ich8lan(sc);
   3525 		}
   3526 
   3527 		break;
   3528 	default:
   3529 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3530 		CSR_WRITE_FLUSH(sc);
   3531 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3532 		CSR_WRITE_FLUSH(sc);
   3533 		break;
   3534 	}
   3535 }
   3536 
   3537 /*
   3538  * wm_mchash:
   3539  *
   3540  *	Compute the hash of the multicast address for the 4096-bit
   3541  *	multicast filter.
   3542  */
   3543 static uint32_t
   3544 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3545 {
   3546 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3547 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3548 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3549 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3550 	uint32_t hash;
   3551 
   3552 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3553 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3554 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3555 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3556 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3557 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3558 		return (hash & 0x3ff);
   3559 	}
   3560 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3561 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3562 
   3563 	return (hash & 0xfff);
   3564 }
   3565 
   3566 /*
   3567  * wm_set_filter:
   3568  *
   3569  *	Set up the receive filter.
   3570  */
   3571 static void
   3572 wm_set_filter(struct wm_softc *sc)
   3573 {
   3574 	struct ethercom *ec = &sc->sc_ethercom;
   3575 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3576 	struct ether_multi *enm;
   3577 	struct ether_multistep step;
   3578 	bus_addr_t mta_reg;
   3579 	uint32_t hash, reg, bit;
   3580 	int i, size, ralmax;
   3581 
   3582 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3583 		device_xname(sc->sc_dev), __func__));
   3584 
   3585 	if (sc->sc_type >= WM_T_82544)
   3586 		mta_reg = WMREG_CORDOVA_MTA;
   3587 	else
   3588 		mta_reg = WMREG_MTA;
   3589 
   3590 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3591 
   3592 	if (ifp->if_flags & IFF_BROADCAST)
   3593 		sc->sc_rctl |= RCTL_BAM;
   3594 	if (ifp->if_flags & IFF_PROMISC) {
   3595 		sc->sc_rctl |= RCTL_UPE;
   3596 		goto allmulti;
   3597 	}
   3598 
   3599 	/*
   3600 	 * Set the station address in the first RAL slot, and
   3601 	 * clear the remaining slots.
   3602 	 */
   3603 	if (sc->sc_type == WM_T_ICH8)
   3604 		size = WM_RAL_TABSIZE_ICH8 -1;
   3605 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3606 	    || (sc->sc_type == WM_T_PCH))
   3607 		size = WM_RAL_TABSIZE_ICH8;
   3608 	else if (sc->sc_type == WM_T_PCH2)
   3609 		size = WM_RAL_TABSIZE_PCH2;
   3610 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3611 	    || (sc->sc_type == WM_T_PCH_CNP))
   3612 		size = WM_RAL_TABSIZE_PCH_LPT;
   3613 	else if (sc->sc_type == WM_T_82575)
   3614 		size = WM_RAL_TABSIZE_82575;
   3615 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3616 		size = WM_RAL_TABSIZE_82576;
   3617 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3618 		size = WM_RAL_TABSIZE_I350;
   3619 	else
   3620 		size = WM_RAL_TABSIZE;
   3621 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3622 
   3623 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3624 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3625 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3626 		switch (i) {
   3627 		case 0:
   3628 			/* We can use all entries */
   3629 			ralmax = size;
   3630 			break;
   3631 		case 1:
   3632 			/* Only RAR[0] */
   3633 			ralmax = 1;
   3634 			break;
   3635 		default:
   3636 			/* available SHRA + RAR[0] */
   3637 			ralmax = i + 1;
   3638 		}
   3639 	} else
   3640 		ralmax = size;
   3641 	for (i = 1; i < size; i++) {
   3642 		if (i < ralmax)
   3643 			wm_set_ral(sc, NULL, i);
   3644 	}
   3645 
   3646 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3647 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3648 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3649 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3650 		size = WM_ICH8_MC_TABSIZE;
   3651 	else
   3652 		size = WM_MC_TABSIZE;
   3653 	/* Clear out the multicast table. */
   3654 	for (i = 0; i < size; i++) {
   3655 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3656 		CSR_WRITE_FLUSH(sc);
   3657 	}
   3658 
   3659 	ETHER_LOCK(ec);
   3660 	ETHER_FIRST_MULTI(step, ec, enm);
   3661 	while (enm != NULL) {
   3662 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3663 			ETHER_UNLOCK(ec);
   3664 			/*
   3665 			 * We must listen to a range of multicast addresses.
   3666 			 * For now, just accept all multicasts, rather than
   3667 			 * trying to set only those filter bits needed to match
   3668 			 * the range.  (At this time, the only use of address
   3669 			 * ranges is for IP multicast routing, for which the
   3670 			 * range is big enough to require all bits set.)
   3671 			 */
   3672 			goto allmulti;
   3673 		}
   3674 
   3675 		hash = wm_mchash(sc, enm->enm_addrlo);
   3676 
   3677 		reg = (hash >> 5);
   3678 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3679 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3680 		    || (sc->sc_type == WM_T_PCH2)
   3681 		    || (sc->sc_type == WM_T_PCH_LPT)
   3682 		    || (sc->sc_type == WM_T_PCH_SPT)
   3683 		    || (sc->sc_type == WM_T_PCH_CNP))
   3684 			reg &= 0x1f;
   3685 		else
   3686 			reg &= 0x7f;
   3687 		bit = hash & 0x1f;
   3688 
   3689 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3690 		hash |= 1U << bit;
   3691 
   3692 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3693 			/*
   3694 			 * 82544 Errata 9: Certain register cannot be written
   3695 			 * with particular alignments in PCI-X bus operation
   3696 			 * (FCAH, MTA and VFTA).
   3697 			 */
   3698 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3699 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3700 			CSR_WRITE_FLUSH(sc);
   3701 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3702 			CSR_WRITE_FLUSH(sc);
   3703 		} else {
   3704 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3705 			CSR_WRITE_FLUSH(sc);
   3706 		}
   3707 
   3708 		ETHER_NEXT_MULTI(step, enm);
   3709 	}
   3710 	ETHER_UNLOCK(ec);
   3711 
   3712 	ifp->if_flags &= ~IFF_ALLMULTI;
   3713 	goto setit;
   3714 
   3715  allmulti:
   3716 	ifp->if_flags |= IFF_ALLMULTI;
   3717 	sc->sc_rctl |= RCTL_MPE;
   3718 
   3719  setit:
   3720 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3721 }
   3722 
   3723 /* Reset and init related */
   3724 
   3725 static void
   3726 wm_set_vlan(struct wm_softc *sc)
   3727 {
   3728 
   3729 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3730 		device_xname(sc->sc_dev), __func__));
   3731 
   3732 	/* Deal with VLAN enables. */
   3733 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3734 		sc->sc_ctrl |= CTRL_VME;
   3735 	else
   3736 		sc->sc_ctrl &= ~CTRL_VME;
   3737 
   3738 	/* Write the control registers. */
   3739 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3740 }
   3741 
   3742 static void
   3743 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3744 {
   3745 	uint32_t gcr;
   3746 	pcireg_t ctrl2;
   3747 
   3748 	gcr = CSR_READ(sc, WMREG_GCR);
   3749 
   3750 	/* Only take action if timeout value is defaulted to 0 */
   3751 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3752 		goto out;
   3753 
   3754 	if ((gcr & GCR_CAP_VER2) == 0) {
   3755 		gcr |= GCR_CMPL_TMOUT_10MS;
   3756 		goto out;
   3757 	}
   3758 
   3759 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3760 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3761 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3762 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3763 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3764 
   3765 out:
   3766 	/* Disable completion timeout resend */
   3767 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3768 
   3769 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3770 }
   3771 
   3772 void
   3773 wm_get_auto_rd_done(struct wm_softc *sc)
   3774 {
   3775 	int i;
   3776 
   3777 	/* wait for eeprom to reload */
   3778 	switch (sc->sc_type) {
   3779 	case WM_T_82571:
   3780 	case WM_T_82572:
   3781 	case WM_T_82573:
   3782 	case WM_T_82574:
   3783 	case WM_T_82583:
   3784 	case WM_T_82575:
   3785 	case WM_T_82576:
   3786 	case WM_T_82580:
   3787 	case WM_T_I350:
   3788 	case WM_T_I354:
   3789 	case WM_T_I210:
   3790 	case WM_T_I211:
   3791 	case WM_T_80003:
   3792 	case WM_T_ICH8:
   3793 	case WM_T_ICH9:
   3794 		for (i = 0; i < 10; i++) {
   3795 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3796 				break;
   3797 			delay(1000);
   3798 		}
   3799 		if (i == 10) {
   3800 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3801 			    "complete\n", device_xname(sc->sc_dev));
   3802 		}
   3803 		break;
   3804 	default:
   3805 		break;
   3806 	}
   3807 }
   3808 
   3809 void
   3810 wm_lan_init_done(struct wm_softc *sc)
   3811 {
   3812 	uint32_t reg = 0;
   3813 	int i;
   3814 
   3815 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3816 		device_xname(sc->sc_dev), __func__));
   3817 
   3818 	/* Wait for eeprom to reload */
   3819 	switch (sc->sc_type) {
   3820 	case WM_T_ICH10:
   3821 	case WM_T_PCH:
   3822 	case WM_T_PCH2:
   3823 	case WM_T_PCH_LPT:
   3824 	case WM_T_PCH_SPT:
   3825 	case WM_T_PCH_CNP:
   3826 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3827 			reg = CSR_READ(sc, WMREG_STATUS);
   3828 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3829 				break;
   3830 			delay(100);
   3831 		}
   3832 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3833 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3834 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3835 		}
   3836 		break;
   3837 	default:
   3838 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3839 		    __func__);
   3840 		break;
   3841 	}
   3842 
   3843 	reg &= ~STATUS_LAN_INIT_DONE;
   3844 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3845 }
   3846 
   3847 void
   3848 wm_get_cfg_done(struct wm_softc *sc)
   3849 {
   3850 	int mask;
   3851 	uint32_t reg;
   3852 	int i;
   3853 
   3854 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3855 		device_xname(sc->sc_dev), __func__));
   3856 
   3857 	/* Wait for eeprom to reload */
   3858 	switch (sc->sc_type) {
   3859 	case WM_T_82542_2_0:
   3860 	case WM_T_82542_2_1:
   3861 		/* null */
   3862 		break;
   3863 	case WM_T_82543:
   3864 	case WM_T_82544:
   3865 	case WM_T_82540:
   3866 	case WM_T_82545:
   3867 	case WM_T_82545_3:
   3868 	case WM_T_82546:
   3869 	case WM_T_82546_3:
   3870 	case WM_T_82541:
   3871 	case WM_T_82541_2:
   3872 	case WM_T_82547:
   3873 	case WM_T_82547_2:
   3874 	case WM_T_82573:
   3875 	case WM_T_82574:
   3876 	case WM_T_82583:
   3877 		/* generic */
   3878 		delay(10*1000);
   3879 		break;
   3880 	case WM_T_80003:
   3881 	case WM_T_82571:
   3882 	case WM_T_82572:
   3883 	case WM_T_82575:
   3884 	case WM_T_82576:
   3885 	case WM_T_82580:
   3886 	case WM_T_I350:
   3887 	case WM_T_I354:
   3888 	case WM_T_I210:
   3889 	case WM_T_I211:
   3890 		if (sc->sc_type == WM_T_82571) {
   3891 			/* Only 82571 shares port 0 */
   3892 			mask = EEMNGCTL_CFGDONE_0;
   3893 		} else
   3894 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3895 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3896 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3897 				break;
   3898 			delay(1000);
   3899 		}
   3900 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3901 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3902 				device_xname(sc->sc_dev), __func__));
   3903 		}
   3904 		break;
   3905 	case WM_T_ICH8:
   3906 	case WM_T_ICH9:
   3907 	case WM_T_ICH10:
   3908 	case WM_T_PCH:
   3909 	case WM_T_PCH2:
   3910 	case WM_T_PCH_LPT:
   3911 	case WM_T_PCH_SPT:
   3912 	case WM_T_PCH_CNP:
   3913 		delay(10*1000);
   3914 		if (sc->sc_type >= WM_T_ICH10)
   3915 			wm_lan_init_done(sc);
   3916 		else
   3917 			wm_get_auto_rd_done(sc);
   3918 
   3919 		/* Clear PHY Reset Asserted bit */
   3920 		reg = CSR_READ(sc, WMREG_STATUS);
   3921 		if ((reg & STATUS_PHYRA) != 0)
   3922 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3923 		break;
   3924 	default:
   3925 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3926 		    __func__);
   3927 		break;
   3928 	}
   3929 }
   3930 
   3931 void
   3932 wm_phy_post_reset(struct wm_softc *sc)
   3933 {
   3934 	uint32_t reg;
   3935 
   3936 	/* This function is only for ICH8 and newer. */
   3937 	if (sc->sc_type < WM_T_ICH8)
   3938 		return;
   3939 
   3940 	if (wm_phy_resetisblocked(sc)) {
   3941 		/* XXX */
   3942 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3943 		return;
   3944 	}
   3945 
   3946 	/* Allow time for h/w to get to quiescent state after reset */
   3947 	delay(10*1000);
   3948 
   3949 	/* Perform any necessary post-reset workarounds */
   3950 	if (sc->sc_type == WM_T_PCH)
   3951 		wm_hv_phy_workaround_ich8lan(sc);
   3952 	else if (sc->sc_type == WM_T_PCH2)
   3953 		wm_lv_phy_workaround_ich8lan(sc);
   3954 
   3955 	/* Clear the host wakeup bit after lcd reset */
   3956 	if (sc->sc_type >= WM_T_PCH) {
   3957 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3958 		    BM_PORT_GEN_CFG);
   3959 		reg &= ~BM_WUC_HOST_WU_BIT;
   3960 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3961 		    BM_PORT_GEN_CFG, reg);
   3962 	}
   3963 
   3964 	/* Configure the LCD with the extended configuration region in NVM */
   3965 	wm_init_lcd_from_nvm(sc);
   3966 
   3967 	/* Configure the LCD with the OEM bits in NVM */
   3968 	wm_oem_bits_config_ich8lan(sc, true);
   3969 
   3970 	if (sc->sc_type == WM_T_PCH2) {
   3971 		/* Ungate automatic PHY configuration on non-managed 82579 */
   3972 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   3973 			delay(10 * 1000);
   3974 			wm_gate_hw_phy_config_ich8lan(sc, false);
   3975 		}
   3976 		/* XXX Set EEE LPI Update Timer to 200usec */
   3977 	}
   3978 }
   3979 
   3980 /* Only for PCH and newer */
   3981 static int
   3982 wm_write_smbus_addr(struct wm_softc *sc)
   3983 {
   3984 	uint32_t strap, freq;
   3985 	uint16_t phy_data;
   3986 	int rv;
   3987 
   3988 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3989 		device_xname(sc->sc_dev), __func__));
   3990 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   3991 
   3992 	strap = CSR_READ(sc, WMREG_STRAP);
   3993 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3994 
   3995 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   3996 	if (rv != 0)
   3997 		return -1;
   3998 
   3999 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4000 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4001 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4002 
   4003 	if (sc->sc_phytype == WMPHY_I217) {
   4004 		/* Restore SMBus frequency */
   4005 		if (freq --) {
   4006 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4007 			    | HV_SMB_ADDR_FREQ_HIGH);
   4008 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4009 			    HV_SMB_ADDR_FREQ_LOW);
   4010 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4011 			    HV_SMB_ADDR_FREQ_HIGH);
   4012 		} else {
   4013 			DPRINTF(WM_DEBUG_INIT,
   4014 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4015 				device_xname(sc->sc_dev), __func__));
   4016 		}
   4017 	}
   4018 
   4019 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4020 	    phy_data);
   4021 }
   4022 
   4023 void
   4024 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4025 {
   4026 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4027 	uint16_t phy_page = 0;
   4028 
   4029 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4030 		device_xname(sc->sc_dev), __func__));
   4031 
   4032 	switch (sc->sc_type) {
   4033 	case WM_T_ICH8:
   4034 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4035 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4036 			return;
   4037 
   4038 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4039 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4040 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4041 			break;
   4042 		}
   4043 		/* FALLTHROUGH */
   4044 	case WM_T_PCH:
   4045 	case WM_T_PCH2:
   4046 	case WM_T_PCH_LPT:
   4047 	case WM_T_PCH_SPT:
   4048 	case WM_T_PCH_CNP:
   4049 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4050 		break;
   4051 	default:
   4052 		return;
   4053 	}
   4054 
   4055 	sc->phy.acquire(sc);
   4056 
   4057 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4058 	if ((reg & sw_cfg_mask) == 0)
   4059 		goto release;
   4060 
   4061 	/*
   4062 	 * Make sure HW does not configure LCD from PHY extended configuration
   4063 	 * before SW configuration
   4064 	 */
   4065 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4066 	if ((sc->sc_type < WM_T_PCH2)
   4067 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4068 		goto release;
   4069 
   4070 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4071 		device_xname(sc->sc_dev), __func__));
   4072 	/* word_addr is in DWORD */
   4073 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4074 
   4075 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4076 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4077 	if (cnf_size == 0)
   4078 		goto release;
   4079 
   4080 	if (((sc->sc_type == WM_T_PCH)
   4081 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4082 	    || (sc->sc_type > WM_T_PCH)) {
   4083 		/*
   4084 		 * HW configures the SMBus address and LEDs when the OEM and
   4085 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4086 		 * are cleared, SW will configure them instead.
   4087 		 */
   4088 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4089 			device_xname(sc->sc_dev), __func__));
   4090 		wm_write_smbus_addr(sc);
   4091 
   4092 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4093 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   4094 	}
   4095 
   4096 	/* Configure LCD from extended configuration region. */
   4097 	for (i = 0; i < cnf_size; i++) {
   4098 		uint16_t reg_data, reg_addr;
   4099 
   4100 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4101 			goto release;
   4102 
   4103 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4104 			goto release;
   4105 
   4106 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4107 			phy_page = reg_data;
   4108 
   4109 		reg_addr &= IGPHY_MAXREGADDR;
   4110 		reg_addr |= phy_page;
   4111 
   4112 		KASSERT(sc->phy.writereg_locked != NULL);
   4113 		sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data);
   4114 	}
   4115 
   4116 release:
   4117 	sc->phy.release(sc);
   4118 	return;
   4119 }
   4120 
   4121 /*
   4122  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4123  *  @sc:       pointer to the HW structure
   4124  *  @d0_state: boolean if entering d0 or d3 device state
   4125  *
   4126  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4127  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4128  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4129  */
   4130 int
   4131 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4132 {
   4133 	uint32_t mac_reg;
   4134 	uint16_t oem_reg;
   4135 	int rv;
   4136 
   4137 	if (sc->sc_type < WM_T_PCH)
   4138 		return 0;
   4139 
   4140 	rv = sc->phy.acquire(sc);
   4141 	if (rv != 0)
   4142 		return rv;
   4143 
   4144 	if (sc->sc_type == WM_T_PCH) {
   4145 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4146 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4147 			goto release;
   4148 	}
   4149 
   4150 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4151 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4152 		goto release;
   4153 
   4154 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4155 
   4156 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4157 	if (rv != 0)
   4158 		goto release;
   4159 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4160 
   4161 	if (d0_state) {
   4162 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4163 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4164 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4165 			oem_reg |= HV_OEM_BITS_LPLU;
   4166 	} else {
   4167 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4168 		    != 0)
   4169 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4170 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4171 		    != 0)
   4172 			oem_reg |= HV_OEM_BITS_LPLU;
   4173 	}
   4174 
   4175 	/* Set Restart auto-neg to activate the bits */
   4176 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4177 	    && (wm_phy_resetisblocked(sc) == false))
   4178 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4179 
   4180 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4181 
   4182 release:
   4183 	sc->phy.release(sc);
   4184 
   4185 	return rv;
   4186 }
   4187 
   4188 /* Init hardware bits */
   4189 void
   4190 wm_initialize_hardware_bits(struct wm_softc *sc)
   4191 {
   4192 	uint32_t tarc0, tarc1, reg;
   4193 
   4194 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4195 		device_xname(sc->sc_dev), __func__));
   4196 
   4197 	/* For 82571 variant, 80003 and ICHs */
   4198 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4199 	    || (sc->sc_type >= WM_T_80003)) {
   4200 
   4201 		/* Transmit Descriptor Control 0 */
   4202 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4203 		reg |= TXDCTL_COUNT_DESC;
   4204 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4205 
   4206 		/* Transmit Descriptor Control 1 */
   4207 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4208 		reg |= TXDCTL_COUNT_DESC;
   4209 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4210 
   4211 		/* TARC0 */
   4212 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4213 		switch (sc->sc_type) {
   4214 		case WM_T_82571:
   4215 		case WM_T_82572:
   4216 		case WM_T_82573:
   4217 		case WM_T_82574:
   4218 		case WM_T_82583:
   4219 		case WM_T_80003:
   4220 			/* Clear bits 30..27 */
   4221 			tarc0 &= ~__BITS(30, 27);
   4222 			break;
   4223 		default:
   4224 			break;
   4225 		}
   4226 
   4227 		switch (sc->sc_type) {
   4228 		case WM_T_82571:
   4229 		case WM_T_82572:
   4230 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4231 
   4232 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4233 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4234 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4235 			/* 8257[12] Errata No.7 */
   4236 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4237 
   4238 			/* TARC1 bit 28 */
   4239 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4240 				tarc1 &= ~__BIT(28);
   4241 			else
   4242 				tarc1 |= __BIT(28);
   4243 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4244 
   4245 			/*
   4246 			 * 8257[12] Errata No.13
   4247 			 * Disable Dyamic Clock Gating.
   4248 			 */
   4249 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4250 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4251 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4252 			break;
   4253 		case WM_T_82573:
   4254 		case WM_T_82574:
   4255 		case WM_T_82583:
   4256 			if ((sc->sc_type == WM_T_82574)
   4257 			    || (sc->sc_type == WM_T_82583))
   4258 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4259 
   4260 			/* Extended Device Control */
   4261 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4262 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4263 			reg |= __BIT(22);	/* Set bit 22 */
   4264 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4265 
   4266 			/* Device Control */
   4267 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4268 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4269 
   4270 			/* PCIe Control Register */
   4271 			/*
   4272 			 * 82573 Errata (unknown).
   4273 			 *
   4274 			 * 82574 Errata 25 and 82583 Errata 12
   4275 			 * "Dropped Rx Packets":
   4276 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4277 			 */
   4278 			reg = CSR_READ(sc, WMREG_GCR);
   4279 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4280 			CSR_WRITE(sc, WMREG_GCR, reg);
   4281 
   4282 			if ((sc->sc_type == WM_T_82574)
   4283 			    || (sc->sc_type == WM_T_82583)) {
   4284 				/*
   4285 				 * Document says this bit must be set for
   4286 				 * proper operation.
   4287 				 */
   4288 				reg = CSR_READ(sc, WMREG_GCR);
   4289 				reg |= __BIT(22);
   4290 				CSR_WRITE(sc, WMREG_GCR, reg);
   4291 
   4292 				/*
   4293 				 * Apply workaround for hardware errata
   4294 				 * documented in errata docs Fixes issue where
   4295 				 * some error prone or unreliable PCIe
   4296 				 * completions are occurring, particularly
   4297 				 * with ASPM enabled. Without fix, issue can
   4298 				 * cause Tx timeouts.
   4299 				 */
   4300 				reg = CSR_READ(sc, WMREG_GCR2);
   4301 				reg |= __BIT(0);
   4302 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4303 			}
   4304 			break;
   4305 		case WM_T_80003:
   4306 			/* TARC0 */
   4307 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4308 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4309 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4310 
   4311 			/* TARC1 bit 28 */
   4312 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4313 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4314 				tarc1 &= ~__BIT(28);
   4315 			else
   4316 				tarc1 |= __BIT(28);
   4317 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4318 			break;
   4319 		case WM_T_ICH8:
   4320 		case WM_T_ICH9:
   4321 		case WM_T_ICH10:
   4322 		case WM_T_PCH:
   4323 		case WM_T_PCH2:
   4324 		case WM_T_PCH_LPT:
   4325 		case WM_T_PCH_SPT:
   4326 		case WM_T_PCH_CNP:
   4327 			/* TARC0 */
   4328 			if (sc->sc_type == WM_T_ICH8) {
   4329 				/* Set TARC0 bits 29 and 28 */
   4330 				tarc0 |= __BITS(29, 28);
   4331 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4332 				tarc0 |= __BIT(29);
   4333 				/*
   4334 				 *  Drop bit 28. From Linux.
   4335 				 * See I218/I219 spec update
   4336 				 * "5. Buffer Overrun While the I219 is
   4337 				 * Processing DMA Transactions"
   4338 				 */
   4339 				tarc0 &= ~__BIT(28);
   4340 			}
   4341 			/* Set TARC0 bits 23,24,26,27 */
   4342 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4343 
   4344 			/* CTRL_EXT */
   4345 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4346 			reg |= __BIT(22);	/* Set bit 22 */
   4347 			/*
   4348 			 * Enable PHY low-power state when MAC is at D3
   4349 			 * w/o WoL
   4350 			 */
   4351 			if (sc->sc_type >= WM_T_PCH)
   4352 				reg |= CTRL_EXT_PHYPDEN;
   4353 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4354 
   4355 			/* TARC1 */
   4356 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4357 			/* bit 28 */
   4358 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4359 				tarc1 &= ~__BIT(28);
   4360 			else
   4361 				tarc1 |= __BIT(28);
   4362 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4363 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4364 
   4365 			/* Device Status */
   4366 			if (sc->sc_type == WM_T_ICH8) {
   4367 				reg = CSR_READ(sc, WMREG_STATUS);
   4368 				reg &= ~__BIT(31);
   4369 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4370 
   4371 			}
   4372 
   4373 			/* IOSFPC */
   4374 			if (sc->sc_type == WM_T_PCH_SPT) {
   4375 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4376 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4377 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4378 			}
   4379 			/*
   4380 			 * Work-around descriptor data corruption issue during
   4381 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4382 			 * capability.
   4383 			 */
   4384 			reg = CSR_READ(sc, WMREG_RFCTL);
   4385 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4386 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4387 			break;
   4388 		default:
   4389 			break;
   4390 		}
   4391 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4392 
   4393 		switch (sc->sc_type) {
   4394 		/*
   4395 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4396 		 * Avoid RSS Hash Value bug.
   4397 		 */
   4398 		case WM_T_82571:
   4399 		case WM_T_82572:
   4400 		case WM_T_82573:
   4401 		case WM_T_80003:
   4402 		case WM_T_ICH8:
   4403 			reg = CSR_READ(sc, WMREG_RFCTL);
   4404 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4405 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4406 			break;
   4407 		case WM_T_82574:
   4408 			/* use extened Rx descriptor. */
   4409 			reg = CSR_READ(sc, WMREG_RFCTL);
   4410 			reg |= WMREG_RFCTL_EXSTEN;
   4411 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4412 			break;
   4413 		default:
   4414 			break;
   4415 		}
   4416 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4417 		/*
   4418 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4419 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4420 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4421 		 * Correctly by the Device"
   4422 		 *
   4423 		 * I354(C2000) Errata AVR53:
   4424 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4425 		 * Hang"
   4426 		 */
   4427 		reg = CSR_READ(sc, WMREG_RFCTL);
   4428 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4429 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4430 	}
   4431 }
   4432 
   4433 static uint32_t
   4434 wm_rxpbs_adjust_82580(uint32_t val)
   4435 {
   4436 	uint32_t rv = 0;
   4437 
   4438 	if (val < __arraycount(wm_82580_rxpbs_table))
   4439 		rv = wm_82580_rxpbs_table[val];
   4440 
   4441 	return rv;
   4442 }
   4443 
   4444 /*
   4445  * wm_reset_phy:
   4446  *
   4447  *	generic PHY reset function.
   4448  *	Same as e1000_phy_hw_reset_generic()
   4449  */
   4450 static int
   4451 wm_reset_phy(struct wm_softc *sc)
   4452 {
   4453 	uint32_t reg;
   4454 
   4455 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4456 		device_xname(sc->sc_dev), __func__));
   4457 	if (wm_phy_resetisblocked(sc))
   4458 		return -1;
   4459 
   4460 	sc->phy.acquire(sc);
   4461 
   4462 	reg = CSR_READ(sc, WMREG_CTRL);
   4463 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4464 	CSR_WRITE_FLUSH(sc);
   4465 
   4466 	delay(sc->phy.reset_delay_us);
   4467 
   4468 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4469 	CSR_WRITE_FLUSH(sc);
   4470 
   4471 	delay(150);
   4472 
   4473 	sc->phy.release(sc);
   4474 
   4475 	wm_get_cfg_done(sc);
   4476 	wm_phy_post_reset(sc);
   4477 
   4478 	return 0;
   4479 }
   4480 
   4481 /*
   4482  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4483  * so it is enough to check sc->sc_queue[0] only.
   4484  */
   4485 static void
   4486 wm_flush_desc_rings(struct wm_softc *sc)
   4487 {
   4488 	pcireg_t preg;
   4489 	uint32_t reg;
   4490 	struct wm_txqueue *txq;
   4491 	wiseman_txdesc_t *txd;
   4492 	int nexttx;
   4493 	uint32_t rctl;
   4494 
   4495 	/* First, disable MULR fix in FEXTNVM11 */
   4496 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4497 	reg |= FEXTNVM11_DIS_MULRFIX;
   4498 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4499 
   4500 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4501 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4502 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4503 		return;
   4504 
   4505 	/* TX */
   4506 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4507 	    device_xname(sc->sc_dev), preg, reg);
   4508 	reg = CSR_READ(sc, WMREG_TCTL);
   4509 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4510 
   4511 	txq = &sc->sc_queue[0].wmq_txq;
   4512 	nexttx = txq->txq_next;
   4513 	txd = &txq->txq_descs[nexttx];
   4514 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4515 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4516 	txd->wtx_fields.wtxu_status = 0;
   4517 	txd->wtx_fields.wtxu_options = 0;
   4518 	txd->wtx_fields.wtxu_vlan = 0;
   4519 
   4520 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4521 	    BUS_SPACE_BARRIER_WRITE);
   4522 
   4523 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4524 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4525 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4526 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4527 	delay(250);
   4528 
   4529 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4530 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4531 		return;
   4532 
   4533 	/* RX */
   4534 	printf("%s: Need RX flush (reg = %08x)\n",
   4535 	    device_xname(sc->sc_dev), preg);
   4536 	rctl = CSR_READ(sc, WMREG_RCTL);
   4537 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4538 	CSR_WRITE_FLUSH(sc);
   4539 	delay(150);
   4540 
   4541 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4542 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4543 	reg &= 0xffffc000;
   4544 	/*
   4545 	 * update thresholds: prefetch threshold to 31, host threshold
   4546 	 * to 1 and make sure the granularity is "descriptors" and not
   4547 	 * "cache lines"
   4548 	 */
   4549 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4550 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4551 
   4552 	/*
   4553 	 * momentarily enable the RX ring for the changes to take
   4554 	 * effect
   4555 	 */
   4556 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4557 	CSR_WRITE_FLUSH(sc);
   4558 	delay(150);
   4559 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4560 }
   4561 
   4562 /*
   4563  * wm_reset:
   4564  *
   4565  *	Reset the i82542 chip.
   4566  */
   4567 static void
   4568 wm_reset(struct wm_softc *sc)
   4569 {
   4570 	int phy_reset = 0;
   4571 	int i, error = 0;
   4572 	uint32_t reg;
   4573 	uint16_t kmreg;
   4574 	int rv;
   4575 
   4576 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4577 		device_xname(sc->sc_dev), __func__));
   4578 	KASSERT(sc->sc_type != 0);
   4579 
   4580 	/*
   4581 	 * Allocate on-chip memory according to the MTU size.
   4582 	 * The Packet Buffer Allocation register must be written
   4583 	 * before the chip is reset.
   4584 	 */
   4585 	switch (sc->sc_type) {
   4586 	case WM_T_82547:
   4587 	case WM_T_82547_2:
   4588 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4589 		    PBA_22K : PBA_30K;
   4590 		for (i = 0; i < sc->sc_nqueues; i++) {
   4591 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4592 			txq->txq_fifo_head = 0;
   4593 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4594 			txq->txq_fifo_size =
   4595 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4596 			txq->txq_fifo_stall = 0;
   4597 		}
   4598 		break;
   4599 	case WM_T_82571:
   4600 	case WM_T_82572:
   4601 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4602 	case WM_T_80003:
   4603 		sc->sc_pba = PBA_32K;
   4604 		break;
   4605 	case WM_T_82573:
   4606 		sc->sc_pba = PBA_12K;
   4607 		break;
   4608 	case WM_T_82574:
   4609 	case WM_T_82583:
   4610 		sc->sc_pba = PBA_20K;
   4611 		break;
   4612 	case WM_T_82576:
   4613 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4614 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4615 		break;
   4616 	case WM_T_82580:
   4617 	case WM_T_I350:
   4618 	case WM_T_I354:
   4619 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4620 		break;
   4621 	case WM_T_I210:
   4622 	case WM_T_I211:
   4623 		sc->sc_pba = PBA_34K;
   4624 		break;
   4625 	case WM_T_ICH8:
   4626 		/* Workaround for a bit corruption issue in FIFO memory */
   4627 		sc->sc_pba = PBA_8K;
   4628 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4629 		break;
   4630 	case WM_T_ICH9:
   4631 	case WM_T_ICH10:
   4632 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4633 		    PBA_14K : PBA_10K;
   4634 		break;
   4635 	case WM_T_PCH:
   4636 	case WM_T_PCH2:	/* XXX 14K? */
   4637 	case WM_T_PCH_LPT:
   4638 	case WM_T_PCH_SPT:
   4639 	case WM_T_PCH_CNP:
   4640 		sc->sc_pba = PBA_26K;
   4641 		break;
   4642 	default:
   4643 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4644 		    PBA_40K : PBA_48K;
   4645 		break;
   4646 	}
   4647 	/*
   4648 	 * Only old or non-multiqueue devices have the PBA register
   4649 	 * XXX Need special handling for 82575.
   4650 	 */
   4651 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4652 	    || (sc->sc_type == WM_T_82575))
   4653 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4654 
   4655 	/* Prevent the PCI-E bus from sticking */
   4656 	if (sc->sc_flags & WM_F_PCIE) {
   4657 		int timeout = 800;
   4658 
   4659 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4660 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4661 
   4662 		while (timeout--) {
   4663 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4664 			    == 0)
   4665 				break;
   4666 			delay(100);
   4667 		}
   4668 		if (timeout == 0)
   4669 			device_printf(sc->sc_dev,
   4670 			    "failed to disable busmastering\n");
   4671 	}
   4672 
   4673 	/* Set the completion timeout for interface */
   4674 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4675 	    || (sc->sc_type == WM_T_82580)
   4676 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4677 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4678 		wm_set_pcie_completion_timeout(sc);
   4679 
   4680 	/* Clear interrupt */
   4681 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4682 	if (wm_is_using_msix(sc)) {
   4683 		if (sc->sc_type != WM_T_82574) {
   4684 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4685 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4686 		} else
   4687 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4688 	}
   4689 
   4690 	/* Stop the transmit and receive processes. */
   4691 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4692 	sc->sc_rctl &= ~RCTL_EN;
   4693 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4694 	CSR_WRITE_FLUSH(sc);
   4695 
   4696 	/* XXX set_tbi_sbp_82543() */
   4697 
   4698 	delay(10*1000);
   4699 
   4700 	/* Must acquire the MDIO ownership before MAC reset */
   4701 	switch (sc->sc_type) {
   4702 	case WM_T_82573:
   4703 	case WM_T_82574:
   4704 	case WM_T_82583:
   4705 		error = wm_get_hw_semaphore_82573(sc);
   4706 		break;
   4707 	default:
   4708 		break;
   4709 	}
   4710 
   4711 	/*
   4712 	 * 82541 Errata 29? & 82547 Errata 28?
   4713 	 * See also the description about PHY_RST bit in CTRL register
   4714 	 * in 8254x_GBe_SDM.pdf.
   4715 	 */
   4716 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4717 		CSR_WRITE(sc, WMREG_CTRL,
   4718 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4719 		CSR_WRITE_FLUSH(sc);
   4720 		delay(5000);
   4721 	}
   4722 
   4723 	switch (sc->sc_type) {
   4724 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4725 	case WM_T_82541:
   4726 	case WM_T_82541_2:
   4727 	case WM_T_82547:
   4728 	case WM_T_82547_2:
   4729 		/*
   4730 		 * On some chipsets, a reset through a memory-mapped write
   4731 		 * cycle can cause the chip to reset before completing the
   4732 		 * write cycle. This causes major headache that can be avoided
   4733 		 * by issuing the reset via indirect register writes through
   4734 		 * I/O space.
   4735 		 *
   4736 		 * So, if we successfully mapped the I/O BAR at attach time,
   4737 		 * use that. Otherwise, try our luck with a memory-mapped
   4738 		 * reset.
   4739 		 */
   4740 		if (sc->sc_flags & WM_F_IOH_VALID)
   4741 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4742 		else
   4743 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4744 		break;
   4745 	case WM_T_82545_3:
   4746 	case WM_T_82546_3:
   4747 		/* Use the shadow control register on these chips. */
   4748 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4749 		break;
   4750 	case WM_T_80003:
   4751 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4752 		sc->phy.acquire(sc);
   4753 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4754 		sc->phy.release(sc);
   4755 		break;
   4756 	case WM_T_ICH8:
   4757 	case WM_T_ICH9:
   4758 	case WM_T_ICH10:
   4759 	case WM_T_PCH:
   4760 	case WM_T_PCH2:
   4761 	case WM_T_PCH_LPT:
   4762 	case WM_T_PCH_SPT:
   4763 	case WM_T_PCH_CNP:
   4764 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4765 		if (wm_phy_resetisblocked(sc) == false) {
   4766 			/*
   4767 			 * Gate automatic PHY configuration by hardware on
   4768 			 * non-managed 82579
   4769 			 */
   4770 			if ((sc->sc_type == WM_T_PCH2)
   4771 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4772 				== 0))
   4773 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4774 
   4775 			reg |= CTRL_PHY_RESET;
   4776 			phy_reset = 1;
   4777 		} else
   4778 			printf("XXX reset is blocked!!!\n");
   4779 		sc->phy.acquire(sc);
   4780 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4781 		/* Don't insert a completion barrier when reset */
   4782 		delay(20*1000);
   4783 		mutex_exit(sc->sc_ich_phymtx);
   4784 		break;
   4785 	case WM_T_82580:
   4786 	case WM_T_I350:
   4787 	case WM_T_I354:
   4788 	case WM_T_I210:
   4789 	case WM_T_I211:
   4790 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4791 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4792 			CSR_WRITE_FLUSH(sc);
   4793 		delay(5000);
   4794 		break;
   4795 	case WM_T_82542_2_0:
   4796 	case WM_T_82542_2_1:
   4797 	case WM_T_82543:
   4798 	case WM_T_82540:
   4799 	case WM_T_82545:
   4800 	case WM_T_82546:
   4801 	case WM_T_82571:
   4802 	case WM_T_82572:
   4803 	case WM_T_82573:
   4804 	case WM_T_82574:
   4805 	case WM_T_82575:
   4806 	case WM_T_82576:
   4807 	case WM_T_82583:
   4808 	default:
   4809 		/* Everything else can safely use the documented method. */
   4810 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4811 		break;
   4812 	}
   4813 
   4814 	/* Must release the MDIO ownership after MAC reset */
   4815 	switch (sc->sc_type) {
   4816 	case WM_T_82573:
   4817 	case WM_T_82574:
   4818 	case WM_T_82583:
   4819 		if (error == 0)
   4820 			wm_put_hw_semaphore_82573(sc);
   4821 		break;
   4822 	default:
   4823 		break;
   4824 	}
   4825 
   4826 	/* Set Phy Config Counter to 50msec */
   4827 	if (sc->sc_type == WM_T_PCH2) {
   4828 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4829 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4830 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4831 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4832 	}
   4833 
   4834 	if (phy_reset != 0)
   4835 		wm_get_cfg_done(sc);
   4836 
   4837 	/* reload EEPROM */
   4838 	switch (sc->sc_type) {
   4839 	case WM_T_82542_2_0:
   4840 	case WM_T_82542_2_1:
   4841 	case WM_T_82543:
   4842 	case WM_T_82544:
   4843 		delay(10);
   4844 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4845 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4846 		CSR_WRITE_FLUSH(sc);
   4847 		delay(2000);
   4848 		break;
   4849 	case WM_T_82540:
   4850 	case WM_T_82545:
   4851 	case WM_T_82545_3:
   4852 	case WM_T_82546:
   4853 	case WM_T_82546_3:
   4854 		delay(5*1000);
   4855 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4856 		break;
   4857 	case WM_T_82541:
   4858 	case WM_T_82541_2:
   4859 	case WM_T_82547:
   4860 	case WM_T_82547_2:
   4861 		delay(20000);
   4862 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4863 		break;
   4864 	case WM_T_82571:
   4865 	case WM_T_82572:
   4866 	case WM_T_82573:
   4867 	case WM_T_82574:
   4868 	case WM_T_82583:
   4869 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4870 			delay(10);
   4871 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4872 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4873 			CSR_WRITE_FLUSH(sc);
   4874 		}
   4875 		/* check EECD_EE_AUTORD */
   4876 		wm_get_auto_rd_done(sc);
   4877 		/*
   4878 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4879 		 * is set.
   4880 		 */
   4881 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4882 		    || (sc->sc_type == WM_T_82583))
   4883 			delay(25*1000);
   4884 		break;
   4885 	case WM_T_82575:
   4886 	case WM_T_82576:
   4887 	case WM_T_82580:
   4888 	case WM_T_I350:
   4889 	case WM_T_I354:
   4890 	case WM_T_I210:
   4891 	case WM_T_I211:
   4892 	case WM_T_80003:
   4893 		/* check EECD_EE_AUTORD */
   4894 		wm_get_auto_rd_done(sc);
   4895 		break;
   4896 	case WM_T_ICH8:
   4897 	case WM_T_ICH9:
   4898 	case WM_T_ICH10:
   4899 	case WM_T_PCH:
   4900 	case WM_T_PCH2:
   4901 	case WM_T_PCH_LPT:
   4902 	case WM_T_PCH_SPT:
   4903 	case WM_T_PCH_CNP:
   4904 		break;
   4905 	default:
   4906 		panic("%s: unknown type\n", __func__);
   4907 	}
   4908 
   4909 	/* Check whether EEPROM is present or not */
   4910 	switch (sc->sc_type) {
   4911 	case WM_T_82575:
   4912 	case WM_T_82576:
   4913 	case WM_T_82580:
   4914 	case WM_T_I350:
   4915 	case WM_T_I354:
   4916 	case WM_T_ICH8:
   4917 	case WM_T_ICH9:
   4918 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4919 			/* Not found */
   4920 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4921 			if (sc->sc_type == WM_T_82575)
   4922 				wm_reset_init_script_82575(sc);
   4923 		}
   4924 		break;
   4925 	default:
   4926 		break;
   4927 	}
   4928 
   4929 	if (phy_reset != 0)
   4930 		wm_phy_post_reset(sc);
   4931 
   4932 	if ((sc->sc_type == WM_T_82580)
   4933 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4934 		/* clear global device reset status bit */
   4935 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4936 	}
   4937 
   4938 	/* Clear any pending interrupt events. */
   4939 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4940 	reg = CSR_READ(sc, WMREG_ICR);
   4941 	if (wm_is_using_msix(sc)) {
   4942 		if (sc->sc_type != WM_T_82574) {
   4943 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4944 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4945 		} else
   4946 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4947 	}
   4948 
   4949 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4950 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4951 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4952 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4953 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4954 		reg |= KABGTXD_BGSQLBIAS;
   4955 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4956 	}
   4957 
   4958 	/* reload sc_ctrl */
   4959 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4960 
   4961 	if (sc->sc_type == WM_T_I354) {
   4962 #if 0
   4963 		/* I354 uses an external PHY */
   4964 		wm_set_eee_i354(sc);
   4965 #endif
   4966 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4967 		wm_set_eee_i350(sc);
   4968 
   4969 	/*
   4970 	 * For PCH, this write will make sure that any noise will be detected
   4971 	 * as a CRC error and be dropped rather than show up as a bad packet
   4972 	 * to the DMA engine
   4973 	 */
   4974 	if (sc->sc_type == WM_T_PCH)
   4975 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4976 
   4977 	if (sc->sc_type >= WM_T_82544)
   4978 		CSR_WRITE(sc, WMREG_WUC, 0);
   4979 
   4980 	if (sc->sc_type < WM_T_82575)
   4981 		wm_disable_aspm(sc);
   4982 
   4983 	wm_reset_mdicnfg_82580(sc);
   4984 
   4985 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4986 		wm_pll_workaround_i210(sc);
   4987 
   4988 	if (sc->sc_type == WM_T_80003) {
   4989 		/* default to TRUE to enable the MDIC W/A */
   4990 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4991 
   4992 		rv = wm_kmrn_readreg(sc,
   4993 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4994 		if (rv == 0) {
   4995 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4996 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4997 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4998 			else
   4999 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5000 		}
   5001 	}
   5002 }
   5003 
   5004 /*
   5005  * wm_add_rxbuf:
   5006  *
   5007  *	Add a receive buffer to the indiciated descriptor.
   5008  */
   5009 static int
   5010 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5011 {
   5012 	struct wm_softc *sc = rxq->rxq_sc;
   5013 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5014 	struct mbuf *m;
   5015 	int error;
   5016 
   5017 	KASSERT(mutex_owned(rxq->rxq_lock));
   5018 
   5019 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5020 	if (m == NULL)
   5021 		return ENOBUFS;
   5022 
   5023 	MCLGET(m, M_DONTWAIT);
   5024 	if ((m->m_flags & M_EXT) == 0) {
   5025 		m_freem(m);
   5026 		return ENOBUFS;
   5027 	}
   5028 
   5029 	if (rxs->rxs_mbuf != NULL)
   5030 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5031 
   5032 	rxs->rxs_mbuf = m;
   5033 
   5034 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5035 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5036 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5037 	if (error) {
   5038 		/* XXX XXX XXX */
   5039 		aprint_error_dev(sc->sc_dev,
   5040 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5041 		panic("wm_add_rxbuf");
   5042 	}
   5043 
   5044 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5045 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5046 
   5047 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5048 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5049 			wm_init_rxdesc(rxq, idx);
   5050 	} else
   5051 		wm_init_rxdesc(rxq, idx);
   5052 
   5053 	return 0;
   5054 }
   5055 
   5056 /*
   5057  * wm_rxdrain:
   5058  *
   5059  *	Drain the receive queue.
   5060  */
   5061 static void
   5062 wm_rxdrain(struct wm_rxqueue *rxq)
   5063 {
   5064 	struct wm_softc *sc = rxq->rxq_sc;
   5065 	struct wm_rxsoft *rxs;
   5066 	int i;
   5067 
   5068 	KASSERT(mutex_owned(rxq->rxq_lock));
   5069 
   5070 	for (i = 0; i < WM_NRXDESC; i++) {
   5071 		rxs = &rxq->rxq_soft[i];
   5072 		if (rxs->rxs_mbuf != NULL) {
   5073 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5074 			m_freem(rxs->rxs_mbuf);
   5075 			rxs->rxs_mbuf = NULL;
   5076 		}
   5077 	}
   5078 }
   5079 
   5080 /*
   5081  * Setup registers for RSS.
   5082  *
   5083  * XXX not yet VMDq support
   5084  */
   5085 static void
   5086 wm_init_rss(struct wm_softc *sc)
   5087 {
   5088 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5089 	int i;
   5090 
   5091 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5092 
   5093 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5094 		int qid, reta_ent;
   5095 
   5096 		qid  = i % sc->sc_nqueues;
   5097 		switch (sc->sc_type) {
   5098 		case WM_T_82574:
   5099 			reta_ent = __SHIFTIN(qid,
   5100 			    RETA_ENT_QINDEX_MASK_82574);
   5101 			break;
   5102 		case WM_T_82575:
   5103 			reta_ent = __SHIFTIN(qid,
   5104 			    RETA_ENT_QINDEX1_MASK_82575);
   5105 			break;
   5106 		default:
   5107 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5108 			break;
   5109 		}
   5110 
   5111 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5112 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5113 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5114 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5115 	}
   5116 
   5117 	rss_getkey((uint8_t *)rss_key);
   5118 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5119 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5120 
   5121 	if (sc->sc_type == WM_T_82574)
   5122 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5123 	else
   5124 		mrqc = MRQC_ENABLE_RSS_MQ;
   5125 
   5126 	/*
   5127 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5128 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5129 	 */
   5130 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5131 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5132 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5133 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5134 
   5135 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5136 }
   5137 
   5138 /*
   5139  * Adjust TX and RX queue numbers which the system actulally uses.
   5140  *
   5141  * The numbers are affected by below parameters.
   5142  *     - The nubmer of hardware queues
   5143  *     - The number of MSI-X vectors (= "nvectors" argument)
   5144  *     - ncpu
   5145  */
   5146 static void
   5147 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5148 {
   5149 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5150 
   5151 	if (nvectors < 2) {
   5152 		sc->sc_nqueues = 1;
   5153 		return;
   5154 	}
   5155 
   5156 	switch (sc->sc_type) {
   5157 	case WM_T_82572:
   5158 		hw_ntxqueues = 2;
   5159 		hw_nrxqueues = 2;
   5160 		break;
   5161 	case WM_T_82574:
   5162 		hw_ntxqueues = 2;
   5163 		hw_nrxqueues = 2;
   5164 		break;
   5165 	case WM_T_82575:
   5166 		hw_ntxqueues = 4;
   5167 		hw_nrxqueues = 4;
   5168 		break;
   5169 	case WM_T_82576:
   5170 		hw_ntxqueues = 16;
   5171 		hw_nrxqueues = 16;
   5172 		break;
   5173 	case WM_T_82580:
   5174 	case WM_T_I350:
   5175 	case WM_T_I354:
   5176 		hw_ntxqueues = 8;
   5177 		hw_nrxqueues = 8;
   5178 		break;
   5179 	case WM_T_I210:
   5180 		hw_ntxqueues = 4;
   5181 		hw_nrxqueues = 4;
   5182 		break;
   5183 	case WM_T_I211:
   5184 		hw_ntxqueues = 2;
   5185 		hw_nrxqueues = 2;
   5186 		break;
   5187 		/*
   5188 		 * As below ethernet controllers does not support MSI-X,
   5189 		 * this driver let them not use multiqueue.
   5190 		 *     - WM_T_80003
   5191 		 *     - WM_T_ICH8
   5192 		 *     - WM_T_ICH9
   5193 		 *     - WM_T_ICH10
   5194 		 *     - WM_T_PCH
   5195 		 *     - WM_T_PCH2
   5196 		 *     - WM_T_PCH_LPT
   5197 		 */
   5198 	default:
   5199 		hw_ntxqueues = 1;
   5200 		hw_nrxqueues = 1;
   5201 		break;
   5202 	}
   5203 
   5204 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5205 
   5206 	/*
   5207 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5208 	 * the number of queues used actually.
   5209 	 */
   5210 	if (nvectors < hw_nqueues + 1)
   5211 		sc->sc_nqueues = nvectors - 1;
   5212 	else
   5213 		sc->sc_nqueues = hw_nqueues;
   5214 
   5215 	/*
   5216 	 * As queues more then cpus cannot improve scaling, we limit
   5217 	 * the number of queues used actually.
   5218 	 */
   5219 	if (ncpu < sc->sc_nqueues)
   5220 		sc->sc_nqueues = ncpu;
   5221 }
   5222 
   5223 static inline bool
   5224 wm_is_using_msix(struct wm_softc *sc)
   5225 {
   5226 
   5227 	return (sc->sc_nintrs > 1);
   5228 }
   5229 
   5230 static inline bool
   5231 wm_is_using_multiqueue(struct wm_softc *sc)
   5232 {
   5233 
   5234 	return (sc->sc_nqueues > 1);
   5235 }
   5236 
   5237 static int
   5238 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5239 {
   5240 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5241 	wmq->wmq_id = qidx;
   5242 	wmq->wmq_intr_idx = intr_idx;
   5243 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5244 #ifdef WM_MPSAFE
   5245 	    | SOFTINT_MPSAFE
   5246 #endif
   5247 	    , wm_handle_queue, wmq);
   5248 	if (wmq->wmq_si != NULL)
   5249 		return 0;
   5250 
   5251 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5252 	    wmq->wmq_id);
   5253 
   5254 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5255 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5256 	return ENOMEM;
   5257 }
   5258 
   5259 /*
   5260  * Both single interrupt MSI and INTx can use this function.
   5261  */
   5262 static int
   5263 wm_setup_legacy(struct wm_softc *sc)
   5264 {
   5265 	pci_chipset_tag_t pc = sc->sc_pc;
   5266 	const char *intrstr = NULL;
   5267 	char intrbuf[PCI_INTRSTR_LEN];
   5268 	int error;
   5269 
   5270 	error = wm_alloc_txrx_queues(sc);
   5271 	if (error) {
   5272 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5273 		    error);
   5274 		return ENOMEM;
   5275 	}
   5276 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5277 	    sizeof(intrbuf));
   5278 #ifdef WM_MPSAFE
   5279 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5280 #endif
   5281 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5282 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5283 	if (sc->sc_ihs[0] == NULL) {
   5284 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5285 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5286 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5287 		return ENOMEM;
   5288 	}
   5289 
   5290 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5291 	sc->sc_nintrs = 1;
   5292 
   5293 	return wm_softint_establish(sc, 0, 0);
   5294 }
   5295 
   5296 static int
   5297 wm_setup_msix(struct wm_softc *sc)
   5298 {
   5299 	void *vih;
   5300 	kcpuset_t *affinity;
   5301 	int qidx, error, intr_idx, txrx_established;
   5302 	pci_chipset_tag_t pc = sc->sc_pc;
   5303 	const char *intrstr = NULL;
   5304 	char intrbuf[PCI_INTRSTR_LEN];
   5305 	char intr_xname[INTRDEVNAMEBUF];
   5306 
   5307 	if (sc->sc_nqueues < ncpu) {
   5308 		/*
   5309 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5310 		 * interrupts start from CPU#1.
   5311 		 */
   5312 		sc->sc_affinity_offset = 1;
   5313 	} else {
   5314 		/*
   5315 		 * In this case, this device use all CPUs. So, we unify
   5316 		 * affinitied cpu_index to msix vector number for readability.
   5317 		 */
   5318 		sc->sc_affinity_offset = 0;
   5319 	}
   5320 
   5321 	error = wm_alloc_txrx_queues(sc);
   5322 	if (error) {
   5323 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5324 		    error);
   5325 		return ENOMEM;
   5326 	}
   5327 
   5328 	kcpuset_create(&affinity, false);
   5329 	intr_idx = 0;
   5330 
   5331 	/*
   5332 	 * TX and RX
   5333 	 */
   5334 	txrx_established = 0;
   5335 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5336 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5337 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5338 
   5339 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5340 		    sizeof(intrbuf));
   5341 #ifdef WM_MPSAFE
   5342 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5343 		    PCI_INTR_MPSAFE, true);
   5344 #endif
   5345 		memset(intr_xname, 0, sizeof(intr_xname));
   5346 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5347 		    device_xname(sc->sc_dev), qidx);
   5348 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5349 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5350 		if (vih == NULL) {
   5351 			aprint_error_dev(sc->sc_dev,
   5352 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5353 			    intrstr ? " at " : "",
   5354 			    intrstr ? intrstr : "");
   5355 
   5356 			goto fail;
   5357 		}
   5358 		kcpuset_zero(affinity);
   5359 		/* Round-robin affinity */
   5360 		kcpuset_set(affinity, affinity_to);
   5361 		error = interrupt_distribute(vih, affinity, NULL);
   5362 		if (error == 0) {
   5363 			aprint_normal_dev(sc->sc_dev,
   5364 			    "for TX and RX interrupting at %s affinity to %u\n",
   5365 			    intrstr, affinity_to);
   5366 		} else {
   5367 			aprint_normal_dev(sc->sc_dev,
   5368 			    "for TX and RX interrupting at %s\n", intrstr);
   5369 		}
   5370 		sc->sc_ihs[intr_idx] = vih;
   5371 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5372 			goto fail;
   5373 		txrx_established++;
   5374 		intr_idx++;
   5375 	}
   5376 
   5377 	/*
   5378 	 * LINK
   5379 	 */
   5380 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5381 	    sizeof(intrbuf));
   5382 #ifdef WM_MPSAFE
   5383 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5384 #endif
   5385 	memset(intr_xname, 0, sizeof(intr_xname));
   5386 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5387 	    device_xname(sc->sc_dev));
   5388 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5389 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5390 	if (vih == NULL) {
   5391 		aprint_error_dev(sc->sc_dev,
   5392 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5393 		    intrstr ? " at " : "",
   5394 		    intrstr ? intrstr : "");
   5395 
   5396 		goto fail;
   5397 	}
   5398 	/* keep default affinity to LINK interrupt */
   5399 	aprint_normal_dev(sc->sc_dev,
   5400 	    "for LINK interrupting at %s\n", intrstr);
   5401 	sc->sc_ihs[intr_idx] = vih;
   5402 	sc->sc_link_intr_idx = intr_idx;
   5403 
   5404 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5405 	kcpuset_destroy(affinity);
   5406 	return 0;
   5407 
   5408  fail:
   5409 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5410 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5411 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5412 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5413 	}
   5414 
   5415 	kcpuset_destroy(affinity);
   5416 	return ENOMEM;
   5417 }
   5418 
   5419 static void
   5420 wm_unset_stopping_flags(struct wm_softc *sc)
   5421 {
   5422 	int i;
   5423 
   5424 	KASSERT(WM_CORE_LOCKED(sc));
   5425 
   5426 	/*
   5427 	 * must unset stopping flags in ascending order.
   5428 	 */
   5429 	for (i = 0; i < sc->sc_nqueues; i++) {
   5430 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5431 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5432 
   5433 		mutex_enter(txq->txq_lock);
   5434 		txq->txq_stopping = false;
   5435 		mutex_exit(txq->txq_lock);
   5436 
   5437 		mutex_enter(rxq->rxq_lock);
   5438 		rxq->rxq_stopping = false;
   5439 		mutex_exit(rxq->rxq_lock);
   5440 	}
   5441 
   5442 	sc->sc_core_stopping = false;
   5443 }
   5444 
   5445 static void
   5446 wm_set_stopping_flags(struct wm_softc *sc)
   5447 {
   5448 	int i;
   5449 
   5450 	KASSERT(WM_CORE_LOCKED(sc));
   5451 
   5452 	sc->sc_core_stopping = true;
   5453 
   5454 	/*
   5455 	 * must set stopping flags in ascending order.
   5456 	 */
   5457 	for (i = 0; i < sc->sc_nqueues; i++) {
   5458 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5459 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5460 
   5461 		mutex_enter(rxq->rxq_lock);
   5462 		rxq->rxq_stopping = true;
   5463 		mutex_exit(rxq->rxq_lock);
   5464 
   5465 		mutex_enter(txq->txq_lock);
   5466 		txq->txq_stopping = true;
   5467 		mutex_exit(txq->txq_lock);
   5468 	}
   5469 }
   5470 
   5471 /*
   5472  * write interrupt interval value to ITR or EITR
   5473  */
   5474 static void
   5475 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5476 {
   5477 
   5478 	if (!wmq->wmq_set_itr)
   5479 		return;
   5480 
   5481 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5482 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5483 
   5484 		/*
   5485 		 * 82575 doesn't have CNT_INGR field.
   5486 		 * So, overwrite counter field by software.
   5487 		 */
   5488 		if (sc->sc_type == WM_T_82575)
   5489 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5490 		else
   5491 			eitr |= EITR_CNT_INGR;
   5492 
   5493 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5494 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5495 		/*
   5496 		 * 82574 has both ITR and EITR. SET EITR when we use
   5497 		 * the multi queue function with MSI-X.
   5498 		 */
   5499 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5500 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5501 	} else {
   5502 		KASSERT(wmq->wmq_id == 0);
   5503 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5504 	}
   5505 
   5506 	wmq->wmq_set_itr = false;
   5507 }
   5508 
   5509 /*
   5510  * TODO
   5511  * Below dynamic calculation of itr is almost the same as linux igb,
   5512  * however it does not fit to wm(4). So, we will have been disable AIM
   5513  * until we will find appropriate calculation of itr.
   5514  */
   5515 /*
   5516  * calculate interrupt interval value to be going to write register in
   5517  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5518  */
   5519 static void
   5520 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5521 {
   5522 #ifdef NOTYET
   5523 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5524 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5525 	uint32_t avg_size = 0;
   5526 	uint32_t new_itr;
   5527 
   5528 	if (rxq->rxq_packets)
   5529 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5530 	if (txq->txq_packets)
   5531 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5532 
   5533 	if (avg_size == 0) {
   5534 		new_itr = 450; /* restore default value */
   5535 		goto out;
   5536 	}
   5537 
   5538 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5539 	avg_size += 24;
   5540 
   5541 	/* Don't starve jumbo frames */
   5542 	avg_size = uimin(avg_size, 3000);
   5543 
   5544 	/* Give a little boost to mid-size frames */
   5545 	if ((avg_size > 300) && (avg_size < 1200))
   5546 		new_itr = avg_size / 3;
   5547 	else
   5548 		new_itr = avg_size / 2;
   5549 
   5550 out:
   5551 	/*
   5552 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5553 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5554 	 */
   5555 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5556 		new_itr *= 4;
   5557 
   5558 	if (new_itr != wmq->wmq_itr) {
   5559 		wmq->wmq_itr = new_itr;
   5560 		wmq->wmq_set_itr = true;
   5561 	} else
   5562 		wmq->wmq_set_itr = false;
   5563 
   5564 	rxq->rxq_packets = 0;
   5565 	rxq->rxq_bytes = 0;
   5566 	txq->txq_packets = 0;
   5567 	txq->txq_bytes = 0;
   5568 #endif
   5569 }
   5570 
   5571 /*
   5572  * wm_init:		[ifnet interface function]
   5573  *
   5574  *	Initialize the interface.
   5575  */
   5576 static int
   5577 wm_init(struct ifnet *ifp)
   5578 {
   5579 	struct wm_softc *sc = ifp->if_softc;
   5580 	int ret;
   5581 
   5582 	WM_CORE_LOCK(sc);
   5583 	ret = wm_init_locked(ifp);
   5584 	WM_CORE_UNLOCK(sc);
   5585 
   5586 	return ret;
   5587 }
   5588 
   5589 static int
   5590 wm_init_locked(struct ifnet *ifp)
   5591 {
   5592 	struct wm_softc *sc = ifp->if_softc;
   5593 	int i, j, trynum, error = 0;
   5594 	uint32_t reg;
   5595 
   5596 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5597 		device_xname(sc->sc_dev), __func__));
   5598 	KASSERT(WM_CORE_LOCKED(sc));
   5599 
   5600 	/*
   5601 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5602 	 * There is a small but measurable benefit to avoiding the adjusment
   5603 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5604 	 * on such platforms.  One possibility is that the DMA itself is
   5605 	 * slightly more efficient if the front of the entire packet (instead
   5606 	 * of the front of the headers) is aligned.
   5607 	 *
   5608 	 * Note we must always set align_tweak to 0 if we are using
   5609 	 * jumbo frames.
   5610 	 */
   5611 #ifdef __NO_STRICT_ALIGNMENT
   5612 	sc->sc_align_tweak = 0;
   5613 #else
   5614 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5615 		sc->sc_align_tweak = 0;
   5616 	else
   5617 		sc->sc_align_tweak = 2;
   5618 #endif /* __NO_STRICT_ALIGNMENT */
   5619 
   5620 	/* Cancel any pending I/O. */
   5621 	wm_stop_locked(ifp, 0);
   5622 
   5623 	/* update statistics before reset */
   5624 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5625 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5626 
   5627 	/* PCH_SPT hardware workaround */
   5628 	if (sc->sc_type == WM_T_PCH_SPT)
   5629 		wm_flush_desc_rings(sc);
   5630 
   5631 	/* Reset the chip to a known state. */
   5632 	wm_reset(sc);
   5633 
   5634 	/*
   5635 	 * AMT based hardware can now take control from firmware
   5636 	 * Do this after reset.
   5637 	 */
   5638 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5639 		wm_get_hw_control(sc);
   5640 
   5641 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5642 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5643 		wm_legacy_irq_quirk_spt(sc);
   5644 
   5645 	/* Init hardware bits */
   5646 	wm_initialize_hardware_bits(sc);
   5647 
   5648 	/* Reset the PHY. */
   5649 	if (sc->sc_flags & WM_F_HAS_MII)
   5650 		wm_gmii_reset(sc);
   5651 
   5652 	if (sc->sc_type >= WM_T_ICH8) {
   5653 		reg = CSR_READ(sc, WMREG_GCR);
   5654 		/*
   5655 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5656 		 * default after reset.
   5657 		 */
   5658 		if (sc->sc_type == WM_T_ICH8)
   5659 			reg |= GCR_NO_SNOOP_ALL;
   5660 		else
   5661 			reg &= ~GCR_NO_SNOOP_ALL;
   5662 		CSR_WRITE(sc, WMREG_GCR, reg);
   5663 	}
   5664 	if ((sc->sc_type >= WM_T_ICH8)
   5665 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5666 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5667 
   5668 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5669 		reg |= CTRL_EXT_RO_DIS;
   5670 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5671 	}
   5672 
   5673 	/* Calculate (E)ITR value */
   5674 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5675 		/*
   5676 		 * For NEWQUEUE's EITR (except for 82575).
   5677 		 * 82575's EITR should be set same throttling value as other
   5678 		 * old controllers' ITR because the interrupt/sec calculation
   5679 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5680 		 *
   5681 		 * 82574's EITR should be set same throttling value as ITR.
   5682 		 *
   5683 		 * For N interrupts/sec, set this value to:
   5684 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5685 		 */
   5686 		sc->sc_itr_init = 450;
   5687 	} else if (sc->sc_type >= WM_T_82543) {
   5688 		/*
   5689 		 * Set up the interrupt throttling register (units of 256ns)
   5690 		 * Note that a footnote in Intel's documentation says this
   5691 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5692 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5693 		 * that that is also true for the 1024ns units of the other
   5694 		 * interrupt-related timer registers -- so, really, we ought
   5695 		 * to divide this value by 4 when the link speed is low.
   5696 		 *
   5697 		 * XXX implement this division at link speed change!
   5698 		 */
   5699 
   5700 		/*
   5701 		 * For N interrupts/sec, set this value to:
   5702 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5703 		 * absolute and packet timer values to this value
   5704 		 * divided by 4 to get "simple timer" behavior.
   5705 		 */
   5706 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5707 	}
   5708 
   5709 	error = wm_init_txrx_queues(sc);
   5710 	if (error)
   5711 		goto out;
   5712 
   5713 	/*
   5714 	 * Clear out the VLAN table -- we don't use it (yet).
   5715 	 */
   5716 	CSR_WRITE(sc, WMREG_VET, 0);
   5717 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5718 		trynum = 10; /* Due to hw errata */
   5719 	else
   5720 		trynum = 1;
   5721 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5722 		for (j = 0; j < trynum; j++)
   5723 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5724 
   5725 	/*
   5726 	 * Set up flow-control parameters.
   5727 	 *
   5728 	 * XXX Values could probably stand some tuning.
   5729 	 */
   5730 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5731 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5732 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5733 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5734 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5735 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5736 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5737 	}
   5738 
   5739 	sc->sc_fcrtl = FCRTL_DFLT;
   5740 	if (sc->sc_type < WM_T_82543) {
   5741 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5742 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5743 	} else {
   5744 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5745 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5746 	}
   5747 
   5748 	if (sc->sc_type == WM_T_80003)
   5749 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5750 	else
   5751 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5752 
   5753 	/* Writes the control register. */
   5754 	wm_set_vlan(sc);
   5755 
   5756 	if (sc->sc_flags & WM_F_HAS_MII) {
   5757 		uint16_t kmreg;
   5758 
   5759 		switch (sc->sc_type) {
   5760 		case WM_T_80003:
   5761 		case WM_T_ICH8:
   5762 		case WM_T_ICH9:
   5763 		case WM_T_ICH10:
   5764 		case WM_T_PCH:
   5765 		case WM_T_PCH2:
   5766 		case WM_T_PCH_LPT:
   5767 		case WM_T_PCH_SPT:
   5768 		case WM_T_PCH_CNP:
   5769 			/*
   5770 			 * Set the mac to wait the maximum time between each
   5771 			 * iteration and increase the max iterations when
   5772 			 * polling the phy; this fixes erroneous timeouts at
   5773 			 * 10Mbps.
   5774 			 */
   5775 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5776 			    0xFFFF);
   5777 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5778 			    &kmreg);
   5779 			kmreg |= 0x3F;
   5780 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5781 			    kmreg);
   5782 			break;
   5783 		default:
   5784 			break;
   5785 		}
   5786 
   5787 		if (sc->sc_type == WM_T_80003) {
   5788 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5789 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5790 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5791 
   5792 			/* Bypass RX and TX FIFO's */
   5793 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5794 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5795 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5796 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5797 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5798 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5799 		}
   5800 	}
   5801 #if 0
   5802 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5803 #endif
   5804 
   5805 	/* Set up checksum offload parameters. */
   5806 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5807 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5808 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5809 		reg |= RXCSUM_IPOFL;
   5810 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5811 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5812 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5813 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5814 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5815 
   5816 	/* Set registers about MSI-X */
   5817 	if (wm_is_using_msix(sc)) {
   5818 		uint32_t ivar;
   5819 		struct wm_queue *wmq;
   5820 		int qid, qintr_idx;
   5821 
   5822 		if (sc->sc_type == WM_T_82575) {
   5823 			/* Interrupt control */
   5824 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5825 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5826 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5827 
   5828 			/* TX and RX */
   5829 			for (i = 0; i < sc->sc_nqueues; i++) {
   5830 				wmq = &sc->sc_queue[i];
   5831 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5832 				    EITR_TX_QUEUE(wmq->wmq_id)
   5833 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5834 			}
   5835 			/* Link status */
   5836 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5837 			    EITR_OTHER);
   5838 		} else if (sc->sc_type == WM_T_82574) {
   5839 			/* Interrupt control */
   5840 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5841 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5842 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5843 
   5844 			/*
   5845 			 * workaround issue with spurious interrupts
   5846 			 * in MSI-X mode.
   5847 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5848 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5849 			 */
   5850 			reg = CSR_READ(sc, WMREG_RFCTL);
   5851 			reg |= WMREG_RFCTL_ACKDIS;
   5852 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5853 
   5854 			ivar = 0;
   5855 			/* TX and RX */
   5856 			for (i = 0; i < sc->sc_nqueues; i++) {
   5857 				wmq = &sc->sc_queue[i];
   5858 				qid = wmq->wmq_id;
   5859 				qintr_idx = wmq->wmq_intr_idx;
   5860 
   5861 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5862 				    IVAR_TX_MASK_Q_82574(qid));
   5863 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5864 				    IVAR_RX_MASK_Q_82574(qid));
   5865 			}
   5866 			/* Link status */
   5867 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5868 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5869 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5870 		} else {
   5871 			/* Interrupt control */
   5872 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5873 			    | GPIE_EIAME | GPIE_PBA);
   5874 
   5875 			switch (sc->sc_type) {
   5876 			case WM_T_82580:
   5877 			case WM_T_I350:
   5878 			case WM_T_I354:
   5879 			case WM_T_I210:
   5880 			case WM_T_I211:
   5881 				/* TX and RX */
   5882 				for (i = 0; i < sc->sc_nqueues; i++) {
   5883 					wmq = &sc->sc_queue[i];
   5884 					qid = wmq->wmq_id;
   5885 					qintr_idx = wmq->wmq_intr_idx;
   5886 
   5887 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5888 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5889 					ivar |= __SHIFTIN((qintr_idx
   5890 						| IVAR_VALID),
   5891 					    IVAR_TX_MASK_Q(qid));
   5892 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5893 					ivar |= __SHIFTIN((qintr_idx
   5894 						| IVAR_VALID),
   5895 					    IVAR_RX_MASK_Q(qid));
   5896 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5897 				}
   5898 				break;
   5899 			case WM_T_82576:
   5900 				/* TX and RX */
   5901 				for (i = 0; i < sc->sc_nqueues; i++) {
   5902 					wmq = &sc->sc_queue[i];
   5903 					qid = wmq->wmq_id;
   5904 					qintr_idx = wmq->wmq_intr_idx;
   5905 
   5906 					ivar = CSR_READ(sc,
   5907 					    WMREG_IVAR_Q_82576(qid));
   5908 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5909 					ivar |= __SHIFTIN((qintr_idx
   5910 						| IVAR_VALID),
   5911 					    IVAR_TX_MASK_Q_82576(qid));
   5912 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5913 					ivar |= __SHIFTIN((qintr_idx
   5914 						| IVAR_VALID),
   5915 					    IVAR_RX_MASK_Q_82576(qid));
   5916 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5917 					    ivar);
   5918 				}
   5919 				break;
   5920 			default:
   5921 				break;
   5922 			}
   5923 
   5924 			/* Link status */
   5925 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5926 			    IVAR_MISC_OTHER);
   5927 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5928 		}
   5929 
   5930 		if (wm_is_using_multiqueue(sc)) {
   5931 			wm_init_rss(sc);
   5932 
   5933 			/*
   5934 			** NOTE: Receive Full-Packet Checksum Offload
   5935 			** is mutually exclusive with Multiqueue. However
   5936 			** this is not the same as TCP/IP checksums which
   5937 			** still work.
   5938 			*/
   5939 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5940 			reg |= RXCSUM_PCSD;
   5941 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5942 		}
   5943 	}
   5944 
   5945 	/* Set up the interrupt registers. */
   5946 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5947 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5948 	    ICR_RXO | ICR_RXT0;
   5949 	if (wm_is_using_msix(sc)) {
   5950 		uint32_t mask;
   5951 		struct wm_queue *wmq;
   5952 
   5953 		switch (sc->sc_type) {
   5954 		case WM_T_82574:
   5955 			mask = 0;
   5956 			for (i = 0; i < sc->sc_nqueues; i++) {
   5957 				wmq = &sc->sc_queue[i];
   5958 				mask |= ICR_TXQ(wmq->wmq_id);
   5959 				mask |= ICR_RXQ(wmq->wmq_id);
   5960 			}
   5961 			mask |= ICR_OTHER;
   5962 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5963 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5964 			break;
   5965 		default:
   5966 			if (sc->sc_type == WM_T_82575) {
   5967 				mask = 0;
   5968 				for (i = 0; i < sc->sc_nqueues; i++) {
   5969 					wmq = &sc->sc_queue[i];
   5970 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5971 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5972 				}
   5973 				mask |= EITR_OTHER;
   5974 			} else {
   5975 				mask = 0;
   5976 				for (i = 0; i < sc->sc_nqueues; i++) {
   5977 					wmq = &sc->sc_queue[i];
   5978 					mask |= 1 << wmq->wmq_intr_idx;
   5979 				}
   5980 				mask |= 1 << sc->sc_link_intr_idx;
   5981 			}
   5982 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5983 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5984 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5985 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5986 			break;
   5987 		}
   5988 	} else
   5989 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5990 
   5991 	/* Set up the inter-packet gap. */
   5992 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5993 
   5994 	if (sc->sc_type >= WM_T_82543) {
   5995 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5996 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5997 			wm_itrs_writereg(sc, wmq);
   5998 		}
   5999 		/*
   6000 		 * Link interrupts occur much less than TX
   6001 		 * interrupts and RX interrupts. So, we don't
   6002 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6003 		 * FreeBSD's if_igb.
   6004 		 */
   6005 	}
   6006 
   6007 	/* Set the VLAN ethernetype. */
   6008 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6009 
   6010 	/*
   6011 	 * Set up the transmit control register; we start out with
   6012 	 * a collision distance suitable for FDX, but update it whe
   6013 	 * we resolve the media type.
   6014 	 */
   6015 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6016 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6017 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6018 	if (sc->sc_type >= WM_T_82571)
   6019 		sc->sc_tctl |= TCTL_MULR;
   6020 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6021 
   6022 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6023 		/* Write TDT after TCTL.EN is set. See the document. */
   6024 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6025 	}
   6026 
   6027 	if (sc->sc_type == WM_T_80003) {
   6028 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6029 		reg &= ~TCTL_EXT_GCEX_MASK;
   6030 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6031 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6032 	}
   6033 
   6034 	/* Set the media. */
   6035 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6036 		goto out;
   6037 
   6038 	/* Configure for OS presence */
   6039 	wm_init_manageability(sc);
   6040 
   6041 	/*
   6042 	 * Set up the receive control register; we actually program the
   6043 	 * register when we set the receive filter. Use multicast address
   6044 	 * offset type 0.
   6045 	 *
   6046 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6047 	 * don't enable that feature.
   6048 	 */
   6049 	sc->sc_mchash_type = 0;
   6050 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6051 	    | RCTL_MO(sc->sc_mchash_type);
   6052 
   6053 	/*
   6054 	 * 82574 use one buffer extended Rx descriptor.
   6055 	 */
   6056 	if (sc->sc_type == WM_T_82574)
   6057 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6058 
   6059 	/*
   6060 	 * The I350 has a bug where it always strips the CRC whether
   6061 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6062 	 */
   6063 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6064 	    || (sc->sc_type == WM_T_I210))
   6065 		sc->sc_rctl |= RCTL_SECRC;
   6066 
   6067 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6068 	    && (ifp->if_mtu > ETHERMTU)) {
   6069 		sc->sc_rctl |= RCTL_LPE;
   6070 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6071 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6072 	}
   6073 
   6074 	if (MCLBYTES == 2048)
   6075 		sc->sc_rctl |= RCTL_2k;
   6076 	else {
   6077 		if (sc->sc_type >= WM_T_82543) {
   6078 			switch (MCLBYTES) {
   6079 			case 4096:
   6080 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6081 				break;
   6082 			case 8192:
   6083 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6084 				break;
   6085 			case 16384:
   6086 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6087 				break;
   6088 			default:
   6089 				panic("wm_init: MCLBYTES %d unsupported",
   6090 				    MCLBYTES);
   6091 				break;
   6092 			}
   6093 		} else
   6094 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6095 	}
   6096 
   6097 	/* Enable ECC */
   6098 	switch (sc->sc_type) {
   6099 	case WM_T_82571:
   6100 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6101 		reg |= PBA_ECC_CORR_EN;
   6102 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6103 		break;
   6104 	case WM_T_PCH_LPT:
   6105 	case WM_T_PCH_SPT:
   6106 	case WM_T_PCH_CNP:
   6107 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6108 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6109 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6110 
   6111 		sc->sc_ctrl |= CTRL_MEHE;
   6112 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6113 		break;
   6114 	default:
   6115 		break;
   6116 	}
   6117 
   6118 	/*
   6119 	 * Set the receive filter.
   6120 	 *
   6121 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6122 	 * the setting of RCTL.EN in wm_set_filter()
   6123 	 */
   6124 	wm_set_filter(sc);
   6125 
   6126 	/* On 575 and later set RDT only if RX enabled */
   6127 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6128 		int qidx;
   6129 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6130 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6131 			for (i = 0; i < WM_NRXDESC; i++) {
   6132 				mutex_enter(rxq->rxq_lock);
   6133 				wm_init_rxdesc(rxq, i);
   6134 				mutex_exit(rxq->rxq_lock);
   6135 
   6136 			}
   6137 		}
   6138 	}
   6139 
   6140 	wm_unset_stopping_flags(sc);
   6141 
   6142 	/* Start the one second link check clock. */
   6143 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6144 
   6145 	/* ...all done! */
   6146 	ifp->if_flags |= IFF_RUNNING;
   6147 	ifp->if_flags &= ~IFF_OACTIVE;
   6148 
   6149  out:
   6150 	sc->sc_if_flags = ifp->if_flags;
   6151 	if (error)
   6152 		log(LOG_ERR, "%s: interface not running\n",
   6153 		    device_xname(sc->sc_dev));
   6154 	return error;
   6155 }
   6156 
   6157 /*
   6158  * wm_stop:		[ifnet interface function]
   6159  *
   6160  *	Stop transmission on the interface.
   6161  */
   6162 static void
   6163 wm_stop(struct ifnet *ifp, int disable)
   6164 {
   6165 	struct wm_softc *sc = ifp->if_softc;
   6166 
   6167 	WM_CORE_LOCK(sc);
   6168 	wm_stop_locked(ifp, disable);
   6169 	WM_CORE_UNLOCK(sc);
   6170 }
   6171 
   6172 static void
   6173 wm_stop_locked(struct ifnet *ifp, int disable)
   6174 {
   6175 	struct wm_softc *sc = ifp->if_softc;
   6176 	struct wm_txsoft *txs;
   6177 	int i, qidx;
   6178 
   6179 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6180 		device_xname(sc->sc_dev), __func__));
   6181 	KASSERT(WM_CORE_LOCKED(sc));
   6182 
   6183 	wm_set_stopping_flags(sc);
   6184 
   6185 	/* Stop the one second clock. */
   6186 	callout_stop(&sc->sc_tick_ch);
   6187 
   6188 	/* Stop the 82547 Tx FIFO stall check timer. */
   6189 	if (sc->sc_type == WM_T_82547)
   6190 		callout_stop(&sc->sc_txfifo_ch);
   6191 
   6192 	if (sc->sc_flags & WM_F_HAS_MII) {
   6193 		/* Down the MII. */
   6194 		mii_down(&sc->sc_mii);
   6195 	} else {
   6196 #if 0
   6197 		/* Should we clear PHY's status properly? */
   6198 		wm_reset(sc);
   6199 #endif
   6200 	}
   6201 
   6202 	/* Stop the transmit and receive processes. */
   6203 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6204 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6205 	sc->sc_rctl &= ~RCTL_EN;
   6206 
   6207 	/*
   6208 	 * Clear the interrupt mask to ensure the device cannot assert its
   6209 	 * interrupt line.
   6210 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6211 	 * service any currently pending or shared interrupt.
   6212 	 */
   6213 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6214 	sc->sc_icr = 0;
   6215 	if (wm_is_using_msix(sc)) {
   6216 		if (sc->sc_type != WM_T_82574) {
   6217 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6218 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6219 		} else
   6220 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6221 	}
   6222 
   6223 	/* Release any queued transmit buffers. */
   6224 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6225 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6226 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6227 		mutex_enter(txq->txq_lock);
   6228 		txq->txq_sending = false; /* ensure watchdog disabled */
   6229 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6230 			txs = &txq->txq_soft[i];
   6231 			if (txs->txs_mbuf != NULL) {
   6232 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6233 				m_freem(txs->txs_mbuf);
   6234 				txs->txs_mbuf = NULL;
   6235 			}
   6236 		}
   6237 		mutex_exit(txq->txq_lock);
   6238 	}
   6239 
   6240 	/* Mark the interface as down and cancel the watchdog timer. */
   6241 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6242 
   6243 	if (disable) {
   6244 		for (i = 0; i < sc->sc_nqueues; i++) {
   6245 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6246 			mutex_enter(rxq->rxq_lock);
   6247 			wm_rxdrain(rxq);
   6248 			mutex_exit(rxq->rxq_lock);
   6249 		}
   6250 	}
   6251 
   6252 #if 0 /* notyet */
   6253 	if (sc->sc_type >= WM_T_82544)
   6254 		CSR_WRITE(sc, WMREG_WUC, 0);
   6255 #endif
   6256 }
   6257 
   6258 static void
   6259 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6260 {
   6261 	struct mbuf *m;
   6262 	int i;
   6263 
   6264 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6265 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6266 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6267 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6268 		    m->m_data, m->m_len, m->m_flags);
   6269 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6270 	    i, i == 1 ? "" : "s");
   6271 }
   6272 
   6273 /*
   6274  * wm_82547_txfifo_stall:
   6275  *
   6276  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6277  *	reset the FIFO pointers, and restart packet transmission.
   6278  */
   6279 static void
   6280 wm_82547_txfifo_stall(void *arg)
   6281 {
   6282 	struct wm_softc *sc = arg;
   6283 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6284 
   6285 	mutex_enter(txq->txq_lock);
   6286 
   6287 	if (txq->txq_stopping)
   6288 		goto out;
   6289 
   6290 	if (txq->txq_fifo_stall) {
   6291 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6292 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6293 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6294 			/*
   6295 			 * Packets have drained.  Stop transmitter, reset
   6296 			 * FIFO pointers, restart transmitter, and kick
   6297 			 * the packet queue.
   6298 			 */
   6299 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6300 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6301 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6302 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6303 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6304 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6305 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6306 			CSR_WRITE_FLUSH(sc);
   6307 
   6308 			txq->txq_fifo_head = 0;
   6309 			txq->txq_fifo_stall = 0;
   6310 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6311 		} else {
   6312 			/*
   6313 			 * Still waiting for packets to drain; try again in
   6314 			 * another tick.
   6315 			 */
   6316 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6317 		}
   6318 	}
   6319 
   6320 out:
   6321 	mutex_exit(txq->txq_lock);
   6322 }
   6323 
   6324 /*
   6325  * wm_82547_txfifo_bugchk:
   6326  *
   6327  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6328  *	prevent enqueueing a packet that would wrap around the end
   6329  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6330  *
   6331  *	We do this by checking the amount of space before the end
   6332  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6333  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6334  *	the internal FIFO pointers to the beginning, and restart
   6335  *	transmission on the interface.
   6336  */
   6337 #define	WM_FIFO_HDR		0x10
   6338 #define	WM_82547_PAD_LEN	0x3e0
   6339 static int
   6340 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6341 {
   6342 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6343 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6344 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6345 
   6346 	/* Just return if already stalled. */
   6347 	if (txq->txq_fifo_stall)
   6348 		return 1;
   6349 
   6350 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6351 		/* Stall only occurs in half-duplex mode. */
   6352 		goto send_packet;
   6353 	}
   6354 
   6355 	if (len >= WM_82547_PAD_LEN + space) {
   6356 		txq->txq_fifo_stall = 1;
   6357 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6358 		return 1;
   6359 	}
   6360 
   6361  send_packet:
   6362 	txq->txq_fifo_head += len;
   6363 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6364 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6365 
   6366 	return 0;
   6367 }
   6368 
   6369 static int
   6370 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6371 {
   6372 	int error;
   6373 
   6374 	/*
   6375 	 * Allocate the control data structures, and create and load the
   6376 	 * DMA map for it.
   6377 	 *
   6378 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6379 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6380 	 * both sets within the same 4G segment.
   6381 	 */
   6382 	if (sc->sc_type < WM_T_82544)
   6383 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6384 	else
   6385 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6386 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6387 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6388 	else
   6389 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6390 
   6391 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6392 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6393 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6394 		aprint_error_dev(sc->sc_dev,
   6395 		    "unable to allocate TX control data, error = %d\n",
   6396 		    error);
   6397 		goto fail_0;
   6398 	}
   6399 
   6400 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6401 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6402 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6403 		aprint_error_dev(sc->sc_dev,
   6404 		    "unable to map TX control data, error = %d\n", error);
   6405 		goto fail_1;
   6406 	}
   6407 
   6408 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6409 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6410 		aprint_error_dev(sc->sc_dev,
   6411 		    "unable to create TX control data DMA map, error = %d\n",
   6412 		    error);
   6413 		goto fail_2;
   6414 	}
   6415 
   6416 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6417 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6418 		aprint_error_dev(sc->sc_dev,
   6419 		    "unable to load TX control data DMA map, error = %d\n",
   6420 		    error);
   6421 		goto fail_3;
   6422 	}
   6423 
   6424 	return 0;
   6425 
   6426  fail_3:
   6427 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6428  fail_2:
   6429 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6430 	    WM_TXDESCS_SIZE(txq));
   6431  fail_1:
   6432 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6433  fail_0:
   6434 	return error;
   6435 }
   6436 
   6437 static void
   6438 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6439 {
   6440 
   6441 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6442 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6443 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6444 	    WM_TXDESCS_SIZE(txq));
   6445 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6446 }
   6447 
   6448 static int
   6449 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6450 {
   6451 	int error;
   6452 	size_t rxq_descs_size;
   6453 
   6454 	/*
   6455 	 * Allocate the control data structures, and create and load the
   6456 	 * DMA map for it.
   6457 	 *
   6458 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6459 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6460 	 * both sets within the same 4G segment.
   6461 	 */
   6462 	rxq->rxq_ndesc = WM_NRXDESC;
   6463 	if (sc->sc_type == WM_T_82574)
   6464 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6465 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6466 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6467 	else
   6468 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6469 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6470 
   6471 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6472 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6473 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6474 		aprint_error_dev(sc->sc_dev,
   6475 		    "unable to allocate RX control data, error = %d\n",
   6476 		    error);
   6477 		goto fail_0;
   6478 	}
   6479 
   6480 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6481 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6482 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6483 		aprint_error_dev(sc->sc_dev,
   6484 		    "unable to map RX control data, error = %d\n", error);
   6485 		goto fail_1;
   6486 	}
   6487 
   6488 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6489 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6490 		aprint_error_dev(sc->sc_dev,
   6491 		    "unable to create RX control data DMA map, error = %d\n",
   6492 		    error);
   6493 		goto fail_2;
   6494 	}
   6495 
   6496 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6497 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6498 		aprint_error_dev(sc->sc_dev,
   6499 		    "unable to load RX control data DMA map, error = %d\n",
   6500 		    error);
   6501 		goto fail_3;
   6502 	}
   6503 
   6504 	return 0;
   6505 
   6506  fail_3:
   6507 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6508  fail_2:
   6509 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6510 	    rxq_descs_size);
   6511  fail_1:
   6512 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6513  fail_0:
   6514 	return error;
   6515 }
   6516 
   6517 static void
   6518 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6519 {
   6520 
   6521 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6522 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6523 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6524 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6525 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6526 }
   6527 
   6528 
   6529 static int
   6530 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6531 {
   6532 	int i, error;
   6533 
   6534 	/* Create the transmit buffer DMA maps. */
   6535 	WM_TXQUEUELEN(txq) =
   6536 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6537 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6538 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6539 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6540 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6541 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6542 			aprint_error_dev(sc->sc_dev,
   6543 			    "unable to create Tx DMA map %d, error = %d\n",
   6544 			    i, error);
   6545 			goto fail;
   6546 		}
   6547 	}
   6548 
   6549 	return 0;
   6550 
   6551  fail:
   6552 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6553 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6554 			bus_dmamap_destroy(sc->sc_dmat,
   6555 			    txq->txq_soft[i].txs_dmamap);
   6556 	}
   6557 	return error;
   6558 }
   6559 
   6560 static void
   6561 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6562 {
   6563 	int i;
   6564 
   6565 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6566 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6567 			bus_dmamap_destroy(sc->sc_dmat,
   6568 			    txq->txq_soft[i].txs_dmamap);
   6569 	}
   6570 }
   6571 
   6572 static int
   6573 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6574 {
   6575 	int i, error;
   6576 
   6577 	/* Create the receive buffer DMA maps. */
   6578 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6579 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6580 			    MCLBYTES, 0, 0,
   6581 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6582 			aprint_error_dev(sc->sc_dev,
   6583 			    "unable to create Rx DMA map %d error = %d\n",
   6584 			    i, error);
   6585 			goto fail;
   6586 		}
   6587 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6588 	}
   6589 
   6590 	return 0;
   6591 
   6592  fail:
   6593 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6594 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6595 			bus_dmamap_destroy(sc->sc_dmat,
   6596 			    rxq->rxq_soft[i].rxs_dmamap);
   6597 	}
   6598 	return error;
   6599 }
   6600 
   6601 static void
   6602 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6603 {
   6604 	int i;
   6605 
   6606 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6607 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6608 			bus_dmamap_destroy(sc->sc_dmat,
   6609 			    rxq->rxq_soft[i].rxs_dmamap);
   6610 	}
   6611 }
   6612 
   6613 /*
   6614  * wm_alloc_quques:
   6615  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6616  */
   6617 static int
   6618 wm_alloc_txrx_queues(struct wm_softc *sc)
   6619 {
   6620 	int i, error, tx_done, rx_done;
   6621 
   6622 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6623 	    KM_SLEEP);
   6624 	if (sc->sc_queue == NULL) {
   6625 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6626 		error = ENOMEM;
   6627 		goto fail_0;
   6628 	}
   6629 
   6630 	/*
   6631 	 * For transmission
   6632 	 */
   6633 	error = 0;
   6634 	tx_done = 0;
   6635 	for (i = 0; i < sc->sc_nqueues; i++) {
   6636 #ifdef WM_EVENT_COUNTERS
   6637 		int j;
   6638 		const char *xname;
   6639 #endif
   6640 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6641 		txq->txq_sc = sc;
   6642 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6643 
   6644 		error = wm_alloc_tx_descs(sc, txq);
   6645 		if (error)
   6646 			break;
   6647 		error = wm_alloc_tx_buffer(sc, txq);
   6648 		if (error) {
   6649 			wm_free_tx_descs(sc, txq);
   6650 			break;
   6651 		}
   6652 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6653 		if (txq->txq_interq == NULL) {
   6654 			wm_free_tx_descs(sc, txq);
   6655 			wm_free_tx_buffer(sc, txq);
   6656 			error = ENOMEM;
   6657 			break;
   6658 		}
   6659 
   6660 #ifdef WM_EVENT_COUNTERS
   6661 		xname = device_xname(sc->sc_dev);
   6662 
   6663 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6664 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6665 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6666 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6667 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6668 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6669 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6670 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6671 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6672 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6673 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6674 
   6675 		for (j = 0; j < WM_NTXSEGS; j++) {
   6676 			snprintf(txq->txq_txseg_evcnt_names[j],
   6677 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6678 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6679 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6680 		}
   6681 
   6682 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6683 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6684 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6685 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6686 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6687 #endif /* WM_EVENT_COUNTERS */
   6688 
   6689 		tx_done++;
   6690 	}
   6691 	if (error)
   6692 		goto fail_1;
   6693 
   6694 	/*
   6695 	 * For recieve
   6696 	 */
   6697 	error = 0;
   6698 	rx_done = 0;
   6699 	for (i = 0; i < sc->sc_nqueues; i++) {
   6700 #ifdef WM_EVENT_COUNTERS
   6701 		const char *xname;
   6702 #endif
   6703 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6704 		rxq->rxq_sc = sc;
   6705 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6706 
   6707 		error = wm_alloc_rx_descs(sc, rxq);
   6708 		if (error)
   6709 			break;
   6710 
   6711 		error = wm_alloc_rx_buffer(sc, rxq);
   6712 		if (error) {
   6713 			wm_free_rx_descs(sc, rxq);
   6714 			break;
   6715 		}
   6716 
   6717 #ifdef WM_EVENT_COUNTERS
   6718 		xname = device_xname(sc->sc_dev);
   6719 
   6720 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6721 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6722 
   6723 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6724 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6725 #endif /* WM_EVENT_COUNTERS */
   6726 
   6727 		rx_done++;
   6728 	}
   6729 	if (error)
   6730 		goto fail_2;
   6731 
   6732 	return 0;
   6733 
   6734  fail_2:
   6735 	for (i = 0; i < rx_done; i++) {
   6736 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6737 		wm_free_rx_buffer(sc, rxq);
   6738 		wm_free_rx_descs(sc, rxq);
   6739 		if (rxq->rxq_lock)
   6740 			mutex_obj_free(rxq->rxq_lock);
   6741 	}
   6742  fail_1:
   6743 	for (i = 0; i < tx_done; i++) {
   6744 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6745 		pcq_destroy(txq->txq_interq);
   6746 		wm_free_tx_buffer(sc, txq);
   6747 		wm_free_tx_descs(sc, txq);
   6748 		if (txq->txq_lock)
   6749 			mutex_obj_free(txq->txq_lock);
   6750 	}
   6751 
   6752 	kmem_free(sc->sc_queue,
   6753 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6754  fail_0:
   6755 	return error;
   6756 }
   6757 
   6758 /*
   6759  * wm_free_quques:
   6760  *	Free {tx,rx}descs and {tx,rx} buffers
   6761  */
   6762 static void
   6763 wm_free_txrx_queues(struct wm_softc *sc)
   6764 {
   6765 	int i;
   6766 
   6767 	for (i = 0; i < sc->sc_nqueues; i++) {
   6768 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6769 
   6770 #ifdef WM_EVENT_COUNTERS
   6771 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6772 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6773 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6774 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6775 #endif /* WM_EVENT_COUNTERS */
   6776 
   6777 		wm_free_rx_buffer(sc, rxq);
   6778 		wm_free_rx_descs(sc, rxq);
   6779 		if (rxq->rxq_lock)
   6780 			mutex_obj_free(rxq->rxq_lock);
   6781 	}
   6782 
   6783 	for (i = 0; i < sc->sc_nqueues; i++) {
   6784 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6785 		struct mbuf *m;
   6786 #ifdef WM_EVENT_COUNTERS
   6787 		int j;
   6788 
   6789 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6790 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6791 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6792 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6793 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6794 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6795 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6796 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6797 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6798 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6799 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6800 
   6801 		for (j = 0; j < WM_NTXSEGS; j++)
   6802 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6803 
   6804 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6805 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6806 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6807 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6808 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6809 #endif /* WM_EVENT_COUNTERS */
   6810 
   6811 		/* drain txq_interq */
   6812 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6813 			m_freem(m);
   6814 		pcq_destroy(txq->txq_interq);
   6815 
   6816 		wm_free_tx_buffer(sc, txq);
   6817 		wm_free_tx_descs(sc, txq);
   6818 		if (txq->txq_lock)
   6819 			mutex_obj_free(txq->txq_lock);
   6820 	}
   6821 
   6822 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6823 }
   6824 
   6825 static void
   6826 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6827 {
   6828 
   6829 	KASSERT(mutex_owned(txq->txq_lock));
   6830 
   6831 	/* Initialize the transmit descriptor ring. */
   6832 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6833 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6834 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6835 	txq->txq_free = WM_NTXDESC(txq);
   6836 	txq->txq_next = 0;
   6837 }
   6838 
   6839 static void
   6840 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6841     struct wm_txqueue *txq)
   6842 {
   6843 
   6844 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6845 		device_xname(sc->sc_dev), __func__));
   6846 	KASSERT(mutex_owned(txq->txq_lock));
   6847 
   6848 	if (sc->sc_type < WM_T_82543) {
   6849 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6850 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6851 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6852 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6853 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6854 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6855 	} else {
   6856 		int qid = wmq->wmq_id;
   6857 
   6858 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6859 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6860 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6861 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6862 
   6863 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6864 			/*
   6865 			 * Don't write TDT before TCTL.EN is set.
   6866 			 * See the document.
   6867 			 */
   6868 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6869 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6870 			    | TXDCTL_WTHRESH(0));
   6871 		else {
   6872 			/* XXX should update with AIM? */
   6873 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6874 			if (sc->sc_type >= WM_T_82540) {
   6875 				/* should be same */
   6876 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6877 			}
   6878 
   6879 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6880 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6881 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6882 		}
   6883 	}
   6884 }
   6885 
   6886 static void
   6887 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6888 {
   6889 	int i;
   6890 
   6891 	KASSERT(mutex_owned(txq->txq_lock));
   6892 
   6893 	/* Initialize the transmit job descriptors. */
   6894 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6895 		txq->txq_soft[i].txs_mbuf = NULL;
   6896 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6897 	txq->txq_snext = 0;
   6898 	txq->txq_sdirty = 0;
   6899 }
   6900 
   6901 static void
   6902 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6903     struct wm_txqueue *txq)
   6904 {
   6905 
   6906 	KASSERT(mutex_owned(txq->txq_lock));
   6907 
   6908 	/*
   6909 	 * Set up some register offsets that are different between
   6910 	 * the i82542 and the i82543 and later chips.
   6911 	 */
   6912 	if (sc->sc_type < WM_T_82543)
   6913 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6914 	else
   6915 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6916 
   6917 	wm_init_tx_descs(sc, txq);
   6918 	wm_init_tx_regs(sc, wmq, txq);
   6919 	wm_init_tx_buffer(sc, txq);
   6920 
   6921 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6922 	txq->txq_sending = false;
   6923 }
   6924 
   6925 static void
   6926 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6927     struct wm_rxqueue *rxq)
   6928 {
   6929 
   6930 	KASSERT(mutex_owned(rxq->rxq_lock));
   6931 
   6932 	/*
   6933 	 * Initialize the receive descriptor and receive job
   6934 	 * descriptor rings.
   6935 	 */
   6936 	if (sc->sc_type < WM_T_82543) {
   6937 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6938 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6939 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6940 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6941 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6942 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6943 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6944 
   6945 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6946 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6947 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6948 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6949 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6950 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6951 	} else {
   6952 		int qid = wmq->wmq_id;
   6953 
   6954 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6955 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6956 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6957 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6958 
   6959 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6960 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6961 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6962 
   6963 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6964 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6965 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6966 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6967 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6968 			    | RXDCTL_WTHRESH(1));
   6969 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6970 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6971 		} else {
   6972 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6973 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6974 			/* XXX should update with AIM? */
   6975 			CSR_WRITE(sc, WMREG_RDTR,
   6976 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6977 			/* MUST be same */
   6978 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6979 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6980 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6981 		}
   6982 	}
   6983 }
   6984 
   6985 static int
   6986 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6987 {
   6988 	struct wm_rxsoft *rxs;
   6989 	int error, i;
   6990 
   6991 	KASSERT(mutex_owned(rxq->rxq_lock));
   6992 
   6993 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6994 		rxs = &rxq->rxq_soft[i];
   6995 		if (rxs->rxs_mbuf == NULL) {
   6996 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6997 				log(LOG_ERR, "%s: unable to allocate or map "
   6998 				    "rx buffer %d, error = %d\n",
   6999 				    device_xname(sc->sc_dev), i, error);
   7000 				/*
   7001 				 * XXX Should attempt to run with fewer receive
   7002 				 * XXX buffers instead of just failing.
   7003 				 */
   7004 				wm_rxdrain(rxq);
   7005 				return ENOMEM;
   7006 			}
   7007 		} else {
   7008 			/*
   7009 			 * For 82575 and 82576, the RX descriptors must be
   7010 			 * initialized after the setting of RCTL.EN in
   7011 			 * wm_set_filter()
   7012 			 */
   7013 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7014 				wm_init_rxdesc(rxq, i);
   7015 		}
   7016 	}
   7017 	rxq->rxq_ptr = 0;
   7018 	rxq->rxq_discard = 0;
   7019 	WM_RXCHAIN_RESET(rxq);
   7020 
   7021 	return 0;
   7022 }
   7023 
   7024 static int
   7025 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7026     struct wm_rxqueue *rxq)
   7027 {
   7028 
   7029 	KASSERT(mutex_owned(rxq->rxq_lock));
   7030 
   7031 	/*
   7032 	 * Set up some register offsets that are different between
   7033 	 * the i82542 and the i82543 and later chips.
   7034 	 */
   7035 	if (sc->sc_type < WM_T_82543)
   7036 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7037 	else
   7038 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7039 
   7040 	wm_init_rx_regs(sc, wmq, rxq);
   7041 	return wm_init_rx_buffer(sc, rxq);
   7042 }
   7043 
   7044 /*
   7045  * wm_init_quques:
   7046  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7047  */
   7048 static int
   7049 wm_init_txrx_queues(struct wm_softc *sc)
   7050 {
   7051 	int i, error = 0;
   7052 
   7053 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7054 		device_xname(sc->sc_dev), __func__));
   7055 
   7056 	for (i = 0; i < sc->sc_nqueues; i++) {
   7057 		struct wm_queue *wmq = &sc->sc_queue[i];
   7058 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7059 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7060 
   7061 		/*
   7062 		 * TODO
   7063 		 * Currently, use constant variable instead of AIM.
   7064 		 * Furthermore, the interrupt interval of multiqueue which use
   7065 		 * polling mode is less than default value.
   7066 		 * More tuning and AIM are required.
   7067 		 */
   7068 		if (wm_is_using_multiqueue(sc))
   7069 			wmq->wmq_itr = 50;
   7070 		else
   7071 			wmq->wmq_itr = sc->sc_itr_init;
   7072 		wmq->wmq_set_itr = true;
   7073 
   7074 		mutex_enter(txq->txq_lock);
   7075 		wm_init_tx_queue(sc, wmq, txq);
   7076 		mutex_exit(txq->txq_lock);
   7077 
   7078 		mutex_enter(rxq->rxq_lock);
   7079 		error = wm_init_rx_queue(sc, wmq, rxq);
   7080 		mutex_exit(rxq->rxq_lock);
   7081 		if (error)
   7082 			break;
   7083 	}
   7084 
   7085 	return error;
   7086 }
   7087 
   7088 /*
   7089  * wm_tx_offload:
   7090  *
   7091  *	Set up TCP/IP checksumming parameters for the
   7092  *	specified packet.
   7093  */
   7094 static int
   7095 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7096     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7097 {
   7098 	struct mbuf *m0 = txs->txs_mbuf;
   7099 	struct livengood_tcpip_ctxdesc *t;
   7100 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7101 	uint32_t ipcse;
   7102 	struct ether_header *eh;
   7103 	int offset, iphl;
   7104 	uint8_t fields;
   7105 
   7106 	/*
   7107 	 * XXX It would be nice if the mbuf pkthdr had offset
   7108 	 * fields for the protocol headers.
   7109 	 */
   7110 
   7111 	eh = mtod(m0, struct ether_header *);
   7112 	switch (htons(eh->ether_type)) {
   7113 	case ETHERTYPE_IP:
   7114 	case ETHERTYPE_IPV6:
   7115 		offset = ETHER_HDR_LEN;
   7116 		break;
   7117 
   7118 	case ETHERTYPE_VLAN:
   7119 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7120 		break;
   7121 
   7122 	default:
   7123 		/*
   7124 		 * Don't support this protocol or encapsulation.
   7125 		 */
   7126 		*fieldsp = 0;
   7127 		*cmdp = 0;
   7128 		return 0;
   7129 	}
   7130 
   7131 	if ((m0->m_pkthdr.csum_flags &
   7132 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7133 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7134 	} else
   7135 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7136 
   7137 	ipcse = offset + iphl - 1;
   7138 
   7139 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7140 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7141 	seg = 0;
   7142 	fields = 0;
   7143 
   7144 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7145 		int hlen = offset + iphl;
   7146 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7147 
   7148 		if (__predict_false(m0->m_len <
   7149 				    (hlen + sizeof(struct tcphdr)))) {
   7150 			/*
   7151 			 * TCP/IP headers are not in the first mbuf; we need
   7152 			 * to do this the slow and painful way. Let's just
   7153 			 * hope this doesn't happen very often.
   7154 			 */
   7155 			struct tcphdr th;
   7156 
   7157 			WM_Q_EVCNT_INCR(txq, tsopain);
   7158 
   7159 			m_copydata(m0, hlen, sizeof(th), &th);
   7160 			if (v4) {
   7161 				struct ip ip;
   7162 
   7163 				m_copydata(m0, offset, sizeof(ip), &ip);
   7164 				ip.ip_len = 0;
   7165 				m_copyback(m0,
   7166 				    offset + offsetof(struct ip, ip_len),
   7167 				    sizeof(ip.ip_len), &ip.ip_len);
   7168 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7169 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7170 			} else {
   7171 				struct ip6_hdr ip6;
   7172 
   7173 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7174 				ip6.ip6_plen = 0;
   7175 				m_copyback(m0,
   7176 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7177 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7178 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7179 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7180 			}
   7181 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7182 			    sizeof(th.th_sum), &th.th_sum);
   7183 
   7184 			hlen += th.th_off << 2;
   7185 		} else {
   7186 			/*
   7187 			 * TCP/IP headers are in the first mbuf; we can do
   7188 			 * this the easy way.
   7189 			 */
   7190 			struct tcphdr *th;
   7191 
   7192 			if (v4) {
   7193 				struct ip *ip =
   7194 				    (void *)(mtod(m0, char *) + offset);
   7195 				th = (void *)(mtod(m0, char *) + hlen);
   7196 
   7197 				ip->ip_len = 0;
   7198 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7199 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7200 			} else {
   7201 				struct ip6_hdr *ip6 =
   7202 				    (void *)(mtod(m0, char *) + offset);
   7203 				th = (void *)(mtod(m0, char *) + hlen);
   7204 
   7205 				ip6->ip6_plen = 0;
   7206 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7207 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7208 			}
   7209 			hlen += th->th_off << 2;
   7210 		}
   7211 
   7212 		if (v4) {
   7213 			WM_Q_EVCNT_INCR(txq, tso);
   7214 			cmdlen |= WTX_TCPIP_CMD_IP;
   7215 		} else {
   7216 			WM_Q_EVCNT_INCR(txq, tso6);
   7217 			ipcse = 0;
   7218 		}
   7219 		cmd |= WTX_TCPIP_CMD_TSE;
   7220 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7221 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7222 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7223 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7224 	}
   7225 
   7226 	/*
   7227 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7228 	 * offload feature, if we load the context descriptor, we
   7229 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7230 	 */
   7231 
   7232 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7233 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7234 	    WTX_TCPIP_IPCSE(ipcse);
   7235 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7236 		WM_Q_EVCNT_INCR(txq, ipsum);
   7237 		fields |= WTX_IXSM;
   7238 	}
   7239 
   7240 	offset += iphl;
   7241 
   7242 	if (m0->m_pkthdr.csum_flags &
   7243 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7244 		WM_Q_EVCNT_INCR(txq, tusum);
   7245 		fields |= WTX_TXSM;
   7246 		tucs = WTX_TCPIP_TUCSS(offset) |
   7247 		    WTX_TCPIP_TUCSO(offset +
   7248 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7249 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7250 	} else if ((m0->m_pkthdr.csum_flags &
   7251 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7252 		WM_Q_EVCNT_INCR(txq, tusum6);
   7253 		fields |= WTX_TXSM;
   7254 		tucs = WTX_TCPIP_TUCSS(offset) |
   7255 		    WTX_TCPIP_TUCSO(offset +
   7256 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7257 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7258 	} else {
   7259 		/* Just initialize it to a valid TCP context. */
   7260 		tucs = WTX_TCPIP_TUCSS(offset) |
   7261 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7262 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7263 	}
   7264 
   7265 	/*
   7266 	 * We don't have to write context descriptor for every packet
   7267 	 * except for 82574. For 82574, we must write context descriptor
   7268 	 * for every packet when we use two descriptor queues.
   7269 	 * It would be overhead to write context descriptor for every packet,
   7270 	 * however it does not cause problems.
   7271 	 */
   7272 	/* Fill in the context descriptor. */
   7273 	t = (struct livengood_tcpip_ctxdesc *)
   7274 	    &txq->txq_descs[txq->txq_next];
   7275 	t->tcpip_ipcs = htole32(ipcs);
   7276 	t->tcpip_tucs = htole32(tucs);
   7277 	t->tcpip_cmdlen = htole32(cmdlen);
   7278 	t->tcpip_seg = htole32(seg);
   7279 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7280 
   7281 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7282 	txs->txs_ndesc++;
   7283 
   7284 	*cmdp = cmd;
   7285 	*fieldsp = fields;
   7286 
   7287 	return 0;
   7288 }
   7289 
   7290 static inline int
   7291 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7292 {
   7293 	struct wm_softc *sc = ifp->if_softc;
   7294 	u_int cpuid = cpu_index(curcpu());
   7295 
   7296 	/*
   7297 	 * Currently, simple distribute strategy.
   7298 	 * TODO:
   7299 	 * distribute by flowid(RSS has value).
   7300 	 */
   7301 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7302 }
   7303 
   7304 /*
   7305  * wm_start:		[ifnet interface function]
   7306  *
   7307  *	Start packet transmission on the interface.
   7308  */
   7309 static void
   7310 wm_start(struct ifnet *ifp)
   7311 {
   7312 	struct wm_softc *sc = ifp->if_softc;
   7313 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7314 
   7315 #ifdef WM_MPSAFE
   7316 	KASSERT(if_is_mpsafe(ifp));
   7317 #endif
   7318 	/*
   7319 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7320 	 */
   7321 
   7322 	mutex_enter(txq->txq_lock);
   7323 	if (!txq->txq_stopping)
   7324 		wm_start_locked(ifp);
   7325 	mutex_exit(txq->txq_lock);
   7326 }
   7327 
   7328 static void
   7329 wm_start_locked(struct ifnet *ifp)
   7330 {
   7331 	struct wm_softc *sc = ifp->if_softc;
   7332 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7333 
   7334 	wm_send_common_locked(ifp, txq, false);
   7335 }
   7336 
   7337 static int
   7338 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7339 {
   7340 	int qid;
   7341 	struct wm_softc *sc = ifp->if_softc;
   7342 	struct wm_txqueue *txq;
   7343 
   7344 	qid = wm_select_txqueue(ifp, m);
   7345 	txq = &sc->sc_queue[qid].wmq_txq;
   7346 
   7347 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7348 		m_freem(m);
   7349 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7350 		return ENOBUFS;
   7351 	}
   7352 
   7353 	/*
   7354 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7355 	 */
   7356 	ifp->if_obytes += m->m_pkthdr.len;
   7357 	if (m->m_flags & M_MCAST)
   7358 		ifp->if_omcasts++;
   7359 
   7360 	if (mutex_tryenter(txq->txq_lock)) {
   7361 		if (!txq->txq_stopping)
   7362 			wm_transmit_locked(ifp, txq);
   7363 		mutex_exit(txq->txq_lock);
   7364 	}
   7365 
   7366 	return 0;
   7367 }
   7368 
   7369 static void
   7370 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7371 {
   7372 
   7373 	wm_send_common_locked(ifp, txq, true);
   7374 }
   7375 
   7376 static void
   7377 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7378     bool is_transmit)
   7379 {
   7380 	struct wm_softc *sc = ifp->if_softc;
   7381 	struct mbuf *m0;
   7382 	struct wm_txsoft *txs;
   7383 	bus_dmamap_t dmamap;
   7384 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7385 	bus_addr_t curaddr;
   7386 	bus_size_t seglen, curlen;
   7387 	uint32_t cksumcmd;
   7388 	uint8_t cksumfields;
   7389 	bool remap = true;
   7390 
   7391 	KASSERT(mutex_owned(txq->txq_lock));
   7392 
   7393 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7394 		return;
   7395 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7396 		return;
   7397 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7398 		return;
   7399 
   7400 	/* Remember the previous number of free descriptors. */
   7401 	ofree = txq->txq_free;
   7402 
   7403 	/*
   7404 	 * Loop through the send queue, setting up transmit descriptors
   7405 	 * until we drain the queue, or use up all available transmit
   7406 	 * descriptors.
   7407 	 */
   7408 	for (;;) {
   7409 		m0 = NULL;
   7410 
   7411 		/* Get a work queue entry. */
   7412 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7413 			wm_txeof(txq, UINT_MAX);
   7414 			if (txq->txq_sfree == 0) {
   7415 				DPRINTF(WM_DEBUG_TX,
   7416 				    ("%s: TX: no free job descriptors\n",
   7417 					device_xname(sc->sc_dev)));
   7418 				WM_Q_EVCNT_INCR(txq, txsstall);
   7419 				break;
   7420 			}
   7421 		}
   7422 
   7423 		/* Grab a packet off the queue. */
   7424 		if (is_transmit)
   7425 			m0 = pcq_get(txq->txq_interq);
   7426 		else
   7427 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7428 		if (m0 == NULL)
   7429 			break;
   7430 
   7431 		DPRINTF(WM_DEBUG_TX,
   7432 		    ("%s: TX: have packet to transmit: %p\n",
   7433 			device_xname(sc->sc_dev), m0));
   7434 
   7435 		txs = &txq->txq_soft[txq->txq_snext];
   7436 		dmamap = txs->txs_dmamap;
   7437 
   7438 		use_tso = (m0->m_pkthdr.csum_flags &
   7439 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7440 
   7441 		/*
   7442 		 * So says the Linux driver:
   7443 		 * The controller does a simple calculation to make sure
   7444 		 * there is enough room in the FIFO before initiating the
   7445 		 * DMA for each buffer. The calc is:
   7446 		 *	4 = ceil(buffer len / MSS)
   7447 		 * To make sure we don't overrun the FIFO, adjust the max
   7448 		 * buffer len if the MSS drops.
   7449 		 */
   7450 		dmamap->dm_maxsegsz =
   7451 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7452 		    ? m0->m_pkthdr.segsz << 2
   7453 		    : WTX_MAX_LEN;
   7454 
   7455 		/*
   7456 		 * Load the DMA map.  If this fails, the packet either
   7457 		 * didn't fit in the allotted number of segments, or we
   7458 		 * were short on resources.  For the too-many-segments
   7459 		 * case, we simply report an error and drop the packet,
   7460 		 * since we can't sanely copy a jumbo packet to a single
   7461 		 * buffer.
   7462 		 */
   7463 retry:
   7464 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7465 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7466 		if (__predict_false(error)) {
   7467 			if (error == EFBIG) {
   7468 				if (remap == true) {
   7469 					struct mbuf *m;
   7470 
   7471 					remap = false;
   7472 					m = m_defrag(m0, M_NOWAIT);
   7473 					if (m != NULL) {
   7474 						WM_Q_EVCNT_INCR(txq, defrag);
   7475 						m0 = m;
   7476 						goto retry;
   7477 					}
   7478 				}
   7479 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7480 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7481 				    "DMA segments, dropping...\n",
   7482 				    device_xname(sc->sc_dev));
   7483 				wm_dump_mbuf_chain(sc, m0);
   7484 				m_freem(m0);
   7485 				continue;
   7486 			}
   7487 			/*  Short on resources, just stop for now. */
   7488 			DPRINTF(WM_DEBUG_TX,
   7489 			    ("%s: TX: dmamap load failed: %d\n",
   7490 				device_xname(sc->sc_dev), error));
   7491 			break;
   7492 		}
   7493 
   7494 		segs_needed = dmamap->dm_nsegs;
   7495 		if (use_tso) {
   7496 			/* For sentinel descriptor; see below. */
   7497 			segs_needed++;
   7498 		}
   7499 
   7500 		/*
   7501 		 * Ensure we have enough descriptors free to describe
   7502 		 * the packet. Note, we always reserve one descriptor
   7503 		 * at the end of the ring due to the semantics of the
   7504 		 * TDT register, plus one more in the event we need
   7505 		 * to load offload context.
   7506 		 */
   7507 		if (segs_needed > txq->txq_free - 2) {
   7508 			/*
   7509 			 * Not enough free descriptors to transmit this
   7510 			 * packet.  We haven't committed anything yet,
   7511 			 * so just unload the DMA map, put the packet
   7512 			 * pack on the queue, and punt. Notify the upper
   7513 			 * layer that there are no more slots left.
   7514 			 */
   7515 			DPRINTF(WM_DEBUG_TX,
   7516 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7517 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7518 				segs_needed, txq->txq_free - 1));
   7519 			if (!is_transmit)
   7520 				ifp->if_flags |= IFF_OACTIVE;
   7521 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7522 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7523 			WM_Q_EVCNT_INCR(txq, txdstall);
   7524 			break;
   7525 		}
   7526 
   7527 		/*
   7528 		 * Check for 82547 Tx FIFO bug. We need to do this
   7529 		 * once we know we can transmit the packet, since we
   7530 		 * do some internal FIFO space accounting here.
   7531 		 */
   7532 		if (sc->sc_type == WM_T_82547 &&
   7533 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7534 			DPRINTF(WM_DEBUG_TX,
   7535 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7536 				device_xname(sc->sc_dev)));
   7537 			if (!is_transmit)
   7538 				ifp->if_flags |= IFF_OACTIVE;
   7539 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7540 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7541 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7542 			break;
   7543 		}
   7544 
   7545 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7546 
   7547 		DPRINTF(WM_DEBUG_TX,
   7548 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7549 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7550 
   7551 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7552 
   7553 		/*
   7554 		 * Store a pointer to the packet so that we can free it
   7555 		 * later.
   7556 		 *
   7557 		 * Initially, we consider the number of descriptors the
   7558 		 * packet uses the number of DMA segments.  This may be
   7559 		 * incremented by 1 if we do checksum offload (a descriptor
   7560 		 * is used to set the checksum context).
   7561 		 */
   7562 		txs->txs_mbuf = m0;
   7563 		txs->txs_firstdesc = txq->txq_next;
   7564 		txs->txs_ndesc = segs_needed;
   7565 
   7566 		/* Set up offload parameters for this packet. */
   7567 		if (m0->m_pkthdr.csum_flags &
   7568 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7569 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7570 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7571 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7572 					  &cksumfields) != 0) {
   7573 				/* Error message already displayed. */
   7574 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7575 				continue;
   7576 			}
   7577 		} else {
   7578 			cksumcmd = 0;
   7579 			cksumfields = 0;
   7580 		}
   7581 
   7582 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7583 
   7584 		/* Sync the DMA map. */
   7585 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7586 		    BUS_DMASYNC_PREWRITE);
   7587 
   7588 		/* Initialize the transmit descriptor. */
   7589 		for (nexttx = txq->txq_next, seg = 0;
   7590 		     seg < dmamap->dm_nsegs; seg++) {
   7591 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7592 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7593 			     seglen != 0;
   7594 			     curaddr += curlen, seglen -= curlen,
   7595 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7596 				curlen = seglen;
   7597 
   7598 				/*
   7599 				 * So says the Linux driver:
   7600 				 * Work around for premature descriptor
   7601 				 * write-backs in TSO mode.  Append a
   7602 				 * 4-byte sentinel descriptor.
   7603 				 */
   7604 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7605 				    curlen > 8)
   7606 					curlen -= 4;
   7607 
   7608 				wm_set_dma_addr(
   7609 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7610 				txq->txq_descs[nexttx].wtx_cmdlen
   7611 				    = htole32(cksumcmd | curlen);
   7612 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7613 				    = 0;
   7614 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7615 				    = cksumfields;
   7616 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7617 				lasttx = nexttx;
   7618 
   7619 				DPRINTF(WM_DEBUG_TX,
   7620 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7621 					"len %#04zx\n",
   7622 					device_xname(sc->sc_dev), nexttx,
   7623 					(uint64_t)curaddr, curlen));
   7624 			}
   7625 		}
   7626 
   7627 		KASSERT(lasttx != -1);
   7628 
   7629 		/*
   7630 		 * Set up the command byte on the last descriptor of
   7631 		 * the packet. If we're in the interrupt delay window,
   7632 		 * delay the interrupt.
   7633 		 */
   7634 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7635 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7636 
   7637 		/*
   7638 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7639 		 * up the descriptor to encapsulate the packet for us.
   7640 		 *
   7641 		 * This is only valid on the last descriptor of the packet.
   7642 		 */
   7643 		if (vlan_has_tag(m0)) {
   7644 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7645 			    htole32(WTX_CMD_VLE);
   7646 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7647 			    = htole16(vlan_get_tag(m0));
   7648 		}
   7649 
   7650 		txs->txs_lastdesc = lasttx;
   7651 
   7652 		DPRINTF(WM_DEBUG_TX,
   7653 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7654 			device_xname(sc->sc_dev),
   7655 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7656 
   7657 		/* Sync the descriptors we're using. */
   7658 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7659 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7660 
   7661 		/* Give the packet to the chip. */
   7662 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7663 
   7664 		DPRINTF(WM_DEBUG_TX,
   7665 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7666 
   7667 		DPRINTF(WM_DEBUG_TX,
   7668 		    ("%s: TX: finished transmitting packet, job %d\n",
   7669 			device_xname(sc->sc_dev), txq->txq_snext));
   7670 
   7671 		/* Advance the tx pointer. */
   7672 		txq->txq_free -= txs->txs_ndesc;
   7673 		txq->txq_next = nexttx;
   7674 
   7675 		txq->txq_sfree--;
   7676 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7677 
   7678 		/* Pass the packet to any BPF listeners. */
   7679 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7680 	}
   7681 
   7682 	if (m0 != NULL) {
   7683 		if (!is_transmit)
   7684 			ifp->if_flags |= IFF_OACTIVE;
   7685 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7686 		WM_Q_EVCNT_INCR(txq, descdrop);
   7687 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7688 			__func__));
   7689 		m_freem(m0);
   7690 	}
   7691 
   7692 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7693 		/* No more slots; notify upper layer. */
   7694 		if (!is_transmit)
   7695 			ifp->if_flags |= IFF_OACTIVE;
   7696 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7697 	}
   7698 
   7699 	if (txq->txq_free != ofree) {
   7700 		/* Set a watchdog timer in case the chip flakes out. */
   7701 		txq->txq_lastsent = time_uptime;
   7702 		txq->txq_sending = true;
   7703 	}
   7704 }
   7705 
   7706 /*
   7707  * wm_nq_tx_offload:
   7708  *
   7709  *	Set up TCP/IP checksumming parameters for the
   7710  *	specified packet, for NEWQUEUE devices
   7711  */
   7712 static int
   7713 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7714     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7715 {
   7716 	struct mbuf *m0 = txs->txs_mbuf;
   7717 	uint32_t vl_len, mssidx, cmdc;
   7718 	struct ether_header *eh;
   7719 	int offset, iphl;
   7720 
   7721 	/*
   7722 	 * XXX It would be nice if the mbuf pkthdr had offset
   7723 	 * fields for the protocol headers.
   7724 	 */
   7725 	*cmdlenp = 0;
   7726 	*fieldsp = 0;
   7727 
   7728 	eh = mtod(m0, struct ether_header *);
   7729 	switch (htons(eh->ether_type)) {
   7730 	case ETHERTYPE_IP:
   7731 	case ETHERTYPE_IPV6:
   7732 		offset = ETHER_HDR_LEN;
   7733 		break;
   7734 
   7735 	case ETHERTYPE_VLAN:
   7736 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7737 		break;
   7738 
   7739 	default:
   7740 		/* Don't support this protocol or encapsulation. */
   7741 		*do_csum = false;
   7742 		return 0;
   7743 	}
   7744 	*do_csum = true;
   7745 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7746 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7747 
   7748 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7749 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7750 
   7751 	if ((m0->m_pkthdr.csum_flags &
   7752 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7753 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7754 	} else {
   7755 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7756 	}
   7757 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7758 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7759 
   7760 	if (vlan_has_tag(m0)) {
   7761 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7762 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7763 		*cmdlenp |= NQTX_CMD_VLE;
   7764 	}
   7765 
   7766 	mssidx = 0;
   7767 
   7768 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7769 		int hlen = offset + iphl;
   7770 		int tcp_hlen;
   7771 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7772 
   7773 		if (__predict_false(m0->m_len <
   7774 				    (hlen + sizeof(struct tcphdr)))) {
   7775 			/*
   7776 			 * TCP/IP headers are not in the first mbuf; we need
   7777 			 * to do this the slow and painful way. Let's just
   7778 			 * hope this doesn't happen very often.
   7779 			 */
   7780 			struct tcphdr th;
   7781 
   7782 			WM_Q_EVCNT_INCR(txq, tsopain);
   7783 
   7784 			m_copydata(m0, hlen, sizeof(th), &th);
   7785 			if (v4) {
   7786 				struct ip ip;
   7787 
   7788 				m_copydata(m0, offset, sizeof(ip), &ip);
   7789 				ip.ip_len = 0;
   7790 				m_copyback(m0,
   7791 				    offset + offsetof(struct ip, ip_len),
   7792 				    sizeof(ip.ip_len), &ip.ip_len);
   7793 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7794 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7795 			} else {
   7796 				struct ip6_hdr ip6;
   7797 
   7798 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7799 				ip6.ip6_plen = 0;
   7800 				m_copyback(m0,
   7801 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7802 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7803 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7804 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7805 			}
   7806 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7807 			    sizeof(th.th_sum), &th.th_sum);
   7808 
   7809 			tcp_hlen = th.th_off << 2;
   7810 		} else {
   7811 			/*
   7812 			 * TCP/IP headers are in the first mbuf; we can do
   7813 			 * this the easy way.
   7814 			 */
   7815 			struct tcphdr *th;
   7816 
   7817 			if (v4) {
   7818 				struct ip *ip =
   7819 				    (void *)(mtod(m0, char *) + offset);
   7820 				th = (void *)(mtod(m0, char *) + hlen);
   7821 
   7822 				ip->ip_len = 0;
   7823 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7824 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7825 			} else {
   7826 				struct ip6_hdr *ip6 =
   7827 				    (void *)(mtod(m0, char *) + offset);
   7828 				th = (void *)(mtod(m0, char *) + hlen);
   7829 
   7830 				ip6->ip6_plen = 0;
   7831 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7832 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7833 			}
   7834 			tcp_hlen = th->th_off << 2;
   7835 		}
   7836 		hlen += tcp_hlen;
   7837 		*cmdlenp |= NQTX_CMD_TSE;
   7838 
   7839 		if (v4) {
   7840 			WM_Q_EVCNT_INCR(txq, tso);
   7841 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7842 		} else {
   7843 			WM_Q_EVCNT_INCR(txq, tso6);
   7844 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7845 		}
   7846 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7847 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7848 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7849 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7850 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7851 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7852 	} else {
   7853 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7854 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7855 	}
   7856 
   7857 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7858 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7859 		cmdc |= NQTXC_CMD_IP4;
   7860 	}
   7861 
   7862 	if (m0->m_pkthdr.csum_flags &
   7863 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7864 		WM_Q_EVCNT_INCR(txq, tusum);
   7865 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7866 			cmdc |= NQTXC_CMD_TCP;
   7867 		else
   7868 			cmdc |= NQTXC_CMD_UDP;
   7869 
   7870 		cmdc |= NQTXC_CMD_IP4;
   7871 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7872 	}
   7873 	if (m0->m_pkthdr.csum_flags &
   7874 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7875 		WM_Q_EVCNT_INCR(txq, tusum6);
   7876 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7877 			cmdc |= NQTXC_CMD_TCP;
   7878 		else
   7879 			cmdc |= NQTXC_CMD_UDP;
   7880 
   7881 		cmdc |= NQTXC_CMD_IP6;
   7882 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7883 	}
   7884 
   7885 	/*
   7886 	 * We don't have to write context descriptor for every packet to
   7887 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7888 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7889 	 * controllers.
   7890 	 * It would be overhead to write context descriptor for every packet,
   7891 	 * however it does not cause problems.
   7892 	 */
   7893 	/* Fill in the context descriptor. */
   7894 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7895 	    htole32(vl_len);
   7896 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7897 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7898 	    htole32(cmdc);
   7899 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7900 	    htole32(mssidx);
   7901 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7902 	DPRINTF(WM_DEBUG_TX,
   7903 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7904 		txq->txq_next, 0, vl_len));
   7905 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7906 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7907 	txs->txs_ndesc++;
   7908 	return 0;
   7909 }
   7910 
   7911 /*
   7912  * wm_nq_start:		[ifnet interface function]
   7913  *
   7914  *	Start packet transmission on the interface for NEWQUEUE devices
   7915  */
   7916 static void
   7917 wm_nq_start(struct ifnet *ifp)
   7918 {
   7919 	struct wm_softc *sc = ifp->if_softc;
   7920 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7921 
   7922 #ifdef WM_MPSAFE
   7923 	KASSERT(if_is_mpsafe(ifp));
   7924 #endif
   7925 	/*
   7926 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7927 	 */
   7928 
   7929 	mutex_enter(txq->txq_lock);
   7930 	if (!txq->txq_stopping)
   7931 		wm_nq_start_locked(ifp);
   7932 	mutex_exit(txq->txq_lock);
   7933 }
   7934 
   7935 static void
   7936 wm_nq_start_locked(struct ifnet *ifp)
   7937 {
   7938 	struct wm_softc *sc = ifp->if_softc;
   7939 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7940 
   7941 	wm_nq_send_common_locked(ifp, txq, false);
   7942 }
   7943 
   7944 static int
   7945 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7946 {
   7947 	int qid;
   7948 	struct wm_softc *sc = ifp->if_softc;
   7949 	struct wm_txqueue *txq;
   7950 
   7951 	qid = wm_select_txqueue(ifp, m);
   7952 	txq = &sc->sc_queue[qid].wmq_txq;
   7953 
   7954 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7955 		m_freem(m);
   7956 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7957 		return ENOBUFS;
   7958 	}
   7959 
   7960 	/*
   7961 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7962 	 */
   7963 	ifp->if_obytes += m->m_pkthdr.len;
   7964 	if (m->m_flags & M_MCAST)
   7965 		ifp->if_omcasts++;
   7966 
   7967 	/*
   7968 	 * The situations which this mutex_tryenter() fails at running time
   7969 	 * are below two patterns.
   7970 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7971 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7972 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7973 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7974 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7975 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7976 	 * stuck, either.
   7977 	 */
   7978 	if (mutex_tryenter(txq->txq_lock)) {
   7979 		if (!txq->txq_stopping)
   7980 			wm_nq_transmit_locked(ifp, txq);
   7981 		mutex_exit(txq->txq_lock);
   7982 	}
   7983 
   7984 	return 0;
   7985 }
   7986 
   7987 static void
   7988 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7989 {
   7990 
   7991 	wm_nq_send_common_locked(ifp, txq, true);
   7992 }
   7993 
   7994 static void
   7995 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7996     bool is_transmit)
   7997 {
   7998 	struct wm_softc *sc = ifp->if_softc;
   7999 	struct mbuf *m0;
   8000 	struct wm_txsoft *txs;
   8001 	bus_dmamap_t dmamap;
   8002 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8003 	bool do_csum, sent;
   8004 	bool remap = true;
   8005 
   8006 	KASSERT(mutex_owned(txq->txq_lock));
   8007 
   8008 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8009 		return;
   8010 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8011 		return;
   8012 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8013 		return;
   8014 
   8015 	sent = false;
   8016 
   8017 	/*
   8018 	 * Loop through the send queue, setting up transmit descriptors
   8019 	 * until we drain the queue, or use up all available transmit
   8020 	 * descriptors.
   8021 	 */
   8022 	for (;;) {
   8023 		m0 = NULL;
   8024 
   8025 		/* Get a work queue entry. */
   8026 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8027 			wm_txeof(txq, UINT_MAX);
   8028 			if (txq->txq_sfree == 0) {
   8029 				DPRINTF(WM_DEBUG_TX,
   8030 				    ("%s: TX: no free job descriptors\n",
   8031 					device_xname(sc->sc_dev)));
   8032 				WM_Q_EVCNT_INCR(txq, txsstall);
   8033 				break;
   8034 			}
   8035 		}
   8036 
   8037 		/* Grab a packet off the queue. */
   8038 		if (is_transmit)
   8039 			m0 = pcq_get(txq->txq_interq);
   8040 		else
   8041 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8042 		if (m0 == NULL)
   8043 			break;
   8044 
   8045 		DPRINTF(WM_DEBUG_TX,
   8046 		    ("%s: TX: have packet to transmit: %p\n",
   8047 		    device_xname(sc->sc_dev), m0));
   8048 
   8049 		txs = &txq->txq_soft[txq->txq_snext];
   8050 		dmamap = txs->txs_dmamap;
   8051 
   8052 		/*
   8053 		 * Load the DMA map.  If this fails, the packet either
   8054 		 * didn't fit in the allotted number of segments, or we
   8055 		 * were short on resources.  For the too-many-segments
   8056 		 * case, we simply report an error and drop the packet,
   8057 		 * since we can't sanely copy a jumbo packet to a single
   8058 		 * buffer.
   8059 		 */
   8060 retry:
   8061 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8062 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8063 		if (__predict_false(error)) {
   8064 			if (error == EFBIG) {
   8065 				if (remap == true) {
   8066 					struct mbuf *m;
   8067 
   8068 					remap = false;
   8069 					m = m_defrag(m0, M_NOWAIT);
   8070 					if (m != NULL) {
   8071 						WM_Q_EVCNT_INCR(txq, defrag);
   8072 						m0 = m;
   8073 						goto retry;
   8074 					}
   8075 				}
   8076 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8077 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8078 				    "DMA segments, dropping...\n",
   8079 				    device_xname(sc->sc_dev));
   8080 				wm_dump_mbuf_chain(sc, m0);
   8081 				m_freem(m0);
   8082 				continue;
   8083 			}
   8084 			/* Short on resources, just stop for now. */
   8085 			DPRINTF(WM_DEBUG_TX,
   8086 			    ("%s: TX: dmamap load failed: %d\n",
   8087 				device_xname(sc->sc_dev), error));
   8088 			break;
   8089 		}
   8090 
   8091 		segs_needed = dmamap->dm_nsegs;
   8092 
   8093 		/*
   8094 		 * Ensure we have enough descriptors free to describe
   8095 		 * the packet. Note, we always reserve one descriptor
   8096 		 * at the end of the ring due to the semantics of the
   8097 		 * TDT register, plus one more in the event we need
   8098 		 * to load offload context.
   8099 		 */
   8100 		if (segs_needed > txq->txq_free - 2) {
   8101 			/*
   8102 			 * Not enough free descriptors to transmit this
   8103 			 * packet.  We haven't committed anything yet,
   8104 			 * so just unload the DMA map, put the packet
   8105 			 * pack on the queue, and punt. Notify the upper
   8106 			 * layer that there are no more slots left.
   8107 			 */
   8108 			DPRINTF(WM_DEBUG_TX,
   8109 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8110 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8111 				segs_needed, txq->txq_free - 1));
   8112 			if (!is_transmit)
   8113 				ifp->if_flags |= IFF_OACTIVE;
   8114 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8115 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8116 			WM_Q_EVCNT_INCR(txq, txdstall);
   8117 			break;
   8118 		}
   8119 
   8120 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8121 
   8122 		DPRINTF(WM_DEBUG_TX,
   8123 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8124 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8125 
   8126 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8127 
   8128 		/*
   8129 		 * Store a pointer to the packet so that we can free it
   8130 		 * later.
   8131 		 *
   8132 		 * Initially, we consider the number of descriptors the
   8133 		 * packet uses the number of DMA segments.  This may be
   8134 		 * incremented by 1 if we do checksum offload (a descriptor
   8135 		 * is used to set the checksum context).
   8136 		 */
   8137 		txs->txs_mbuf = m0;
   8138 		txs->txs_firstdesc = txq->txq_next;
   8139 		txs->txs_ndesc = segs_needed;
   8140 
   8141 		/* Set up offload parameters for this packet. */
   8142 		uint32_t cmdlen, fields, dcmdlen;
   8143 		if (m0->m_pkthdr.csum_flags &
   8144 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8145 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8146 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8147 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8148 			    &do_csum) != 0) {
   8149 				/* Error message already displayed. */
   8150 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8151 				continue;
   8152 			}
   8153 		} else {
   8154 			do_csum = false;
   8155 			cmdlen = 0;
   8156 			fields = 0;
   8157 		}
   8158 
   8159 		/* Sync the DMA map. */
   8160 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8161 		    BUS_DMASYNC_PREWRITE);
   8162 
   8163 		/* Initialize the first transmit descriptor. */
   8164 		nexttx = txq->txq_next;
   8165 		if (!do_csum) {
   8166 			/* setup a legacy descriptor */
   8167 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8168 			    dmamap->dm_segs[0].ds_addr);
   8169 			txq->txq_descs[nexttx].wtx_cmdlen =
   8170 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8171 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8172 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8173 			if (vlan_has_tag(m0)) {
   8174 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8175 				    htole32(WTX_CMD_VLE);
   8176 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8177 				    htole16(vlan_get_tag(m0));
   8178 			} else
   8179 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8180 
   8181 			dcmdlen = 0;
   8182 		} else {
   8183 			/* setup an advanced data descriptor */
   8184 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8185 			    htole64(dmamap->dm_segs[0].ds_addr);
   8186 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8187 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8188 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8189 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8190 			    htole32(fields);
   8191 			DPRINTF(WM_DEBUG_TX,
   8192 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8193 				device_xname(sc->sc_dev), nexttx,
   8194 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8195 			DPRINTF(WM_DEBUG_TX,
   8196 			    ("\t 0x%08x%08x\n", fields,
   8197 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8198 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8199 		}
   8200 
   8201 		lasttx = nexttx;
   8202 		nexttx = WM_NEXTTX(txq, nexttx);
   8203 		/*
   8204 		 * fill in the next descriptors. legacy or advanced format
   8205 		 * is the same here
   8206 		 */
   8207 		for (seg = 1; seg < dmamap->dm_nsegs;
   8208 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8209 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8210 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8211 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8212 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8213 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8214 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8215 			lasttx = nexttx;
   8216 
   8217 			DPRINTF(WM_DEBUG_TX,
   8218 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8219 				device_xname(sc->sc_dev), nexttx,
   8220 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8221 				dmamap->dm_segs[seg].ds_len));
   8222 		}
   8223 
   8224 		KASSERT(lasttx != -1);
   8225 
   8226 		/*
   8227 		 * Set up the command byte on the last descriptor of
   8228 		 * the packet. If we're in the interrupt delay window,
   8229 		 * delay the interrupt.
   8230 		 */
   8231 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8232 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8233 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8234 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8235 
   8236 		txs->txs_lastdesc = lasttx;
   8237 
   8238 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8239 		    device_xname(sc->sc_dev),
   8240 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8241 
   8242 		/* Sync the descriptors we're using. */
   8243 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8244 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8245 
   8246 		/* Give the packet to the chip. */
   8247 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8248 		sent = true;
   8249 
   8250 		DPRINTF(WM_DEBUG_TX,
   8251 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8252 
   8253 		DPRINTF(WM_DEBUG_TX,
   8254 		    ("%s: TX: finished transmitting packet, job %d\n",
   8255 			device_xname(sc->sc_dev), txq->txq_snext));
   8256 
   8257 		/* Advance the tx pointer. */
   8258 		txq->txq_free -= txs->txs_ndesc;
   8259 		txq->txq_next = nexttx;
   8260 
   8261 		txq->txq_sfree--;
   8262 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8263 
   8264 		/* Pass the packet to any BPF listeners. */
   8265 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8266 	}
   8267 
   8268 	if (m0 != NULL) {
   8269 		if (!is_transmit)
   8270 			ifp->if_flags |= IFF_OACTIVE;
   8271 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8272 		WM_Q_EVCNT_INCR(txq, descdrop);
   8273 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8274 			__func__));
   8275 		m_freem(m0);
   8276 	}
   8277 
   8278 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8279 		/* No more slots; notify upper layer. */
   8280 		if (!is_transmit)
   8281 			ifp->if_flags |= IFF_OACTIVE;
   8282 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8283 	}
   8284 
   8285 	if (sent) {
   8286 		/* Set a watchdog timer in case the chip flakes out. */
   8287 		txq->txq_lastsent = time_uptime;
   8288 		txq->txq_sending = true;
   8289 	}
   8290 }
   8291 
   8292 static void
   8293 wm_deferred_start_locked(struct wm_txqueue *txq)
   8294 {
   8295 	struct wm_softc *sc = txq->txq_sc;
   8296 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8297 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8298 	int qid = wmq->wmq_id;
   8299 
   8300 	KASSERT(mutex_owned(txq->txq_lock));
   8301 
   8302 	if (txq->txq_stopping) {
   8303 		mutex_exit(txq->txq_lock);
   8304 		return;
   8305 	}
   8306 
   8307 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8308 		/* XXX need for ALTQ or one CPU system */
   8309 		if (qid == 0)
   8310 			wm_nq_start_locked(ifp);
   8311 		wm_nq_transmit_locked(ifp, txq);
   8312 	} else {
   8313 		/* XXX need for ALTQ or one CPU system */
   8314 		if (qid == 0)
   8315 			wm_start_locked(ifp);
   8316 		wm_transmit_locked(ifp, txq);
   8317 	}
   8318 }
   8319 
   8320 /* Interrupt */
   8321 
   8322 /*
   8323  * wm_txeof:
   8324  *
   8325  *	Helper; handle transmit interrupts.
   8326  */
   8327 static bool
   8328 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8329 {
   8330 	struct wm_softc *sc = txq->txq_sc;
   8331 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8332 	struct wm_txsoft *txs;
   8333 	int count = 0;
   8334 	int i;
   8335 	uint8_t status;
   8336 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8337 	bool more = false;
   8338 
   8339 	KASSERT(mutex_owned(txq->txq_lock));
   8340 
   8341 	if (txq->txq_stopping)
   8342 		return false;
   8343 
   8344 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8345 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8346 	if (wmq->wmq_id == 0)
   8347 		ifp->if_flags &= ~IFF_OACTIVE;
   8348 
   8349 	/*
   8350 	 * Go through the Tx list and free mbufs for those
   8351 	 * frames which have been transmitted.
   8352 	 */
   8353 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8354 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8355 		if (limit-- == 0) {
   8356 			more = true;
   8357 			DPRINTF(WM_DEBUG_TX,
   8358 			    ("%s: TX: loop limited, job %d is not processed\n",
   8359 				device_xname(sc->sc_dev), i));
   8360 			break;
   8361 		}
   8362 
   8363 		txs = &txq->txq_soft[i];
   8364 
   8365 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8366 			device_xname(sc->sc_dev), i));
   8367 
   8368 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8369 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8370 
   8371 		status =
   8372 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8373 		if ((status & WTX_ST_DD) == 0) {
   8374 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8375 			    BUS_DMASYNC_PREREAD);
   8376 			break;
   8377 		}
   8378 
   8379 		count++;
   8380 		DPRINTF(WM_DEBUG_TX,
   8381 		    ("%s: TX: job %d done: descs %d..%d\n",
   8382 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8383 		    txs->txs_lastdesc));
   8384 
   8385 		/*
   8386 		 * XXX We should probably be using the statistics
   8387 		 * XXX registers, but I don't know if they exist
   8388 		 * XXX on chips before the i82544.
   8389 		 */
   8390 
   8391 #ifdef WM_EVENT_COUNTERS
   8392 		if (status & WTX_ST_TU)
   8393 			WM_Q_EVCNT_INCR(txq, underrun);
   8394 #endif /* WM_EVENT_COUNTERS */
   8395 
   8396 		/*
   8397 		 * 82574 and newer's document says the status field has neither
   8398 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8399 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8400 		 * Developer's Manual", 82574 datasheet and newer.
   8401 		 *
   8402 		 * XXX I saw the LC bit was set on I218 even though the media
   8403 		 * was full duplex, so the bit might be used for other
   8404 		 * meaning ...(I have no document).
   8405 		 */
   8406 
   8407 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8408 		    && ((sc->sc_type < WM_T_82574)
   8409 			|| (sc->sc_type == WM_T_80003))) {
   8410 			ifp->if_oerrors++;
   8411 			if (status & WTX_ST_LC)
   8412 				log(LOG_WARNING, "%s: late collision\n",
   8413 				    device_xname(sc->sc_dev));
   8414 			else if (status & WTX_ST_EC) {
   8415 				ifp->if_collisions +=
   8416 				    TX_COLLISION_THRESHOLD + 1;
   8417 				log(LOG_WARNING, "%s: excessive collisions\n",
   8418 				    device_xname(sc->sc_dev));
   8419 			}
   8420 		} else
   8421 			ifp->if_opackets++;
   8422 
   8423 		txq->txq_packets++;
   8424 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8425 
   8426 		txq->txq_free += txs->txs_ndesc;
   8427 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8428 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8429 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8430 		m_freem(txs->txs_mbuf);
   8431 		txs->txs_mbuf = NULL;
   8432 	}
   8433 
   8434 	/* Update the dirty transmit buffer pointer. */
   8435 	txq->txq_sdirty = i;
   8436 	DPRINTF(WM_DEBUG_TX,
   8437 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8438 
   8439 	if (count != 0)
   8440 		rnd_add_uint32(&sc->rnd_source, count);
   8441 
   8442 	/*
   8443 	 * If there are no more pending transmissions, cancel the watchdog
   8444 	 * timer.
   8445 	 */
   8446 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8447 		txq->txq_sending = false;
   8448 
   8449 	return more;
   8450 }
   8451 
   8452 static inline uint32_t
   8453 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8454 {
   8455 	struct wm_softc *sc = rxq->rxq_sc;
   8456 
   8457 	if (sc->sc_type == WM_T_82574)
   8458 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8459 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8460 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8461 	else
   8462 		return rxq->rxq_descs[idx].wrx_status;
   8463 }
   8464 
   8465 static inline uint32_t
   8466 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8467 {
   8468 	struct wm_softc *sc = rxq->rxq_sc;
   8469 
   8470 	if (sc->sc_type == WM_T_82574)
   8471 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8472 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8473 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8474 	else
   8475 		return rxq->rxq_descs[idx].wrx_errors;
   8476 }
   8477 
   8478 static inline uint16_t
   8479 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8480 {
   8481 	struct wm_softc *sc = rxq->rxq_sc;
   8482 
   8483 	if (sc->sc_type == WM_T_82574)
   8484 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8485 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8486 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8487 	else
   8488 		return rxq->rxq_descs[idx].wrx_special;
   8489 }
   8490 
   8491 static inline int
   8492 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8493 {
   8494 	struct wm_softc *sc = rxq->rxq_sc;
   8495 
   8496 	if (sc->sc_type == WM_T_82574)
   8497 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8498 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8499 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8500 	else
   8501 		return rxq->rxq_descs[idx].wrx_len;
   8502 }
   8503 
   8504 #ifdef WM_DEBUG
   8505 static inline uint32_t
   8506 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8507 {
   8508 	struct wm_softc *sc = rxq->rxq_sc;
   8509 
   8510 	if (sc->sc_type == WM_T_82574)
   8511 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8512 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8513 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8514 	else
   8515 		return 0;
   8516 }
   8517 
   8518 static inline uint8_t
   8519 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8520 {
   8521 	struct wm_softc *sc = rxq->rxq_sc;
   8522 
   8523 	if (sc->sc_type == WM_T_82574)
   8524 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8525 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8526 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8527 	else
   8528 		return 0;
   8529 }
   8530 #endif /* WM_DEBUG */
   8531 
   8532 static inline bool
   8533 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8534     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8535 {
   8536 
   8537 	if (sc->sc_type == WM_T_82574)
   8538 		return (status & ext_bit) != 0;
   8539 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8540 		return (status & nq_bit) != 0;
   8541 	else
   8542 		return (status & legacy_bit) != 0;
   8543 }
   8544 
   8545 static inline bool
   8546 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8547     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8548 {
   8549 
   8550 	if (sc->sc_type == WM_T_82574)
   8551 		return (error & ext_bit) != 0;
   8552 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8553 		return (error & nq_bit) != 0;
   8554 	else
   8555 		return (error & legacy_bit) != 0;
   8556 }
   8557 
   8558 static inline bool
   8559 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8560 {
   8561 
   8562 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8563 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8564 		return true;
   8565 	else
   8566 		return false;
   8567 }
   8568 
   8569 static inline bool
   8570 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8571 {
   8572 	struct wm_softc *sc = rxq->rxq_sc;
   8573 
   8574 	/* XXXX missing error bit for newqueue? */
   8575 	if (wm_rxdesc_is_set_error(sc, errors,
   8576 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8577 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8578 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8579 		NQRXC_ERROR_RXE)) {
   8580 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8581 		    EXTRXC_ERROR_SE, 0))
   8582 			log(LOG_WARNING, "%s: symbol error\n",
   8583 			    device_xname(sc->sc_dev));
   8584 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8585 		    EXTRXC_ERROR_SEQ, 0))
   8586 			log(LOG_WARNING, "%s: receive sequence error\n",
   8587 			    device_xname(sc->sc_dev));
   8588 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8589 		    EXTRXC_ERROR_CE, 0))
   8590 			log(LOG_WARNING, "%s: CRC error\n",
   8591 			    device_xname(sc->sc_dev));
   8592 		return true;
   8593 	}
   8594 
   8595 	return false;
   8596 }
   8597 
   8598 static inline bool
   8599 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8600 {
   8601 	struct wm_softc *sc = rxq->rxq_sc;
   8602 
   8603 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8604 		NQRXC_STATUS_DD)) {
   8605 		/* We have processed all of the receive descriptors. */
   8606 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8607 		return false;
   8608 	}
   8609 
   8610 	return true;
   8611 }
   8612 
   8613 static inline bool
   8614 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8615     uint16_t vlantag, struct mbuf *m)
   8616 {
   8617 
   8618 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8619 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8620 		vlan_set_tag(m, le16toh(vlantag));
   8621 	}
   8622 
   8623 	return true;
   8624 }
   8625 
   8626 static inline void
   8627 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8628     uint32_t errors, struct mbuf *m)
   8629 {
   8630 	struct wm_softc *sc = rxq->rxq_sc;
   8631 
   8632 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8633 		if (wm_rxdesc_is_set_status(sc, status,
   8634 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8635 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8636 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8637 			if (wm_rxdesc_is_set_error(sc, errors,
   8638 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8639 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8640 		}
   8641 		if (wm_rxdesc_is_set_status(sc, status,
   8642 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8643 			/*
   8644 			 * Note: we don't know if this was TCP or UDP,
   8645 			 * so we just set both bits, and expect the
   8646 			 * upper layers to deal.
   8647 			 */
   8648 			WM_Q_EVCNT_INCR(rxq, tusum);
   8649 			m->m_pkthdr.csum_flags |=
   8650 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8651 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8652 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8653 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8654 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8655 		}
   8656 	}
   8657 }
   8658 
   8659 /*
   8660  * wm_rxeof:
   8661  *
   8662  *	Helper; handle receive interrupts.
   8663  */
   8664 static bool
   8665 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8666 {
   8667 	struct wm_softc *sc = rxq->rxq_sc;
   8668 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8669 	struct wm_rxsoft *rxs;
   8670 	struct mbuf *m;
   8671 	int i, len;
   8672 	int count = 0;
   8673 	uint32_t status, errors;
   8674 	uint16_t vlantag;
   8675 	bool more = false;
   8676 
   8677 	KASSERT(mutex_owned(rxq->rxq_lock));
   8678 
   8679 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8680 		if (limit-- == 0) {
   8681 			rxq->rxq_ptr = i;
   8682 			more = true;
   8683 			DPRINTF(WM_DEBUG_RX,
   8684 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8685 				device_xname(sc->sc_dev), i));
   8686 			break;
   8687 		}
   8688 
   8689 		rxs = &rxq->rxq_soft[i];
   8690 
   8691 		DPRINTF(WM_DEBUG_RX,
   8692 		    ("%s: RX: checking descriptor %d\n",
   8693 			device_xname(sc->sc_dev), i));
   8694 		wm_cdrxsync(rxq, i,
   8695 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8696 
   8697 		status = wm_rxdesc_get_status(rxq, i);
   8698 		errors = wm_rxdesc_get_errors(rxq, i);
   8699 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8700 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8701 #ifdef WM_DEBUG
   8702 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8703 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8704 #endif
   8705 
   8706 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8707 			/*
   8708 			 * Update the receive pointer holding rxq_lock
   8709 			 * consistent with increment counter.
   8710 			 */
   8711 			rxq->rxq_ptr = i;
   8712 			break;
   8713 		}
   8714 
   8715 		count++;
   8716 		if (__predict_false(rxq->rxq_discard)) {
   8717 			DPRINTF(WM_DEBUG_RX,
   8718 			    ("%s: RX: discarding contents of descriptor %d\n",
   8719 				device_xname(sc->sc_dev), i));
   8720 			wm_init_rxdesc(rxq, i);
   8721 			if (wm_rxdesc_is_eop(rxq, status)) {
   8722 				/* Reset our state. */
   8723 				DPRINTF(WM_DEBUG_RX,
   8724 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8725 					device_xname(sc->sc_dev)));
   8726 				rxq->rxq_discard = 0;
   8727 			}
   8728 			continue;
   8729 		}
   8730 
   8731 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8732 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8733 
   8734 		m = rxs->rxs_mbuf;
   8735 
   8736 		/*
   8737 		 * Add a new receive buffer to the ring, unless of
   8738 		 * course the length is zero. Treat the latter as a
   8739 		 * failed mapping.
   8740 		 */
   8741 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8742 			/*
   8743 			 * Failed, throw away what we've done so
   8744 			 * far, and discard the rest of the packet.
   8745 			 */
   8746 			ifp->if_ierrors++;
   8747 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8748 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8749 			wm_init_rxdesc(rxq, i);
   8750 			if (!wm_rxdesc_is_eop(rxq, status))
   8751 				rxq->rxq_discard = 1;
   8752 			if (rxq->rxq_head != NULL)
   8753 				m_freem(rxq->rxq_head);
   8754 			WM_RXCHAIN_RESET(rxq);
   8755 			DPRINTF(WM_DEBUG_RX,
   8756 			    ("%s: RX: Rx buffer allocation failed, "
   8757 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8758 				rxq->rxq_discard ? " (discard)" : ""));
   8759 			continue;
   8760 		}
   8761 
   8762 		m->m_len = len;
   8763 		rxq->rxq_len += len;
   8764 		DPRINTF(WM_DEBUG_RX,
   8765 		    ("%s: RX: buffer at %p len %d\n",
   8766 			device_xname(sc->sc_dev), m->m_data, len));
   8767 
   8768 		/* If this is not the end of the packet, keep looking. */
   8769 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8770 			WM_RXCHAIN_LINK(rxq, m);
   8771 			DPRINTF(WM_DEBUG_RX,
   8772 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8773 				device_xname(sc->sc_dev), rxq->rxq_len));
   8774 			continue;
   8775 		}
   8776 
   8777 		/*
   8778 		 * Okay, we have the entire packet now. The chip is
   8779 		 * configured to include the FCS except I350 and I21[01]
   8780 		 * (not all chips can be configured to strip it),
   8781 		 * so we need to trim it.
   8782 		 * May need to adjust length of previous mbuf in the
   8783 		 * chain if the current mbuf is too short.
   8784 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8785 		 * is always set in I350, so we don't trim it.
   8786 		 */
   8787 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8788 		    && (sc->sc_type != WM_T_I210)
   8789 		    && (sc->sc_type != WM_T_I211)) {
   8790 			if (m->m_len < ETHER_CRC_LEN) {
   8791 				rxq->rxq_tail->m_len
   8792 				    -= (ETHER_CRC_LEN - m->m_len);
   8793 				m->m_len = 0;
   8794 			} else
   8795 				m->m_len -= ETHER_CRC_LEN;
   8796 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8797 		} else
   8798 			len = rxq->rxq_len;
   8799 
   8800 		WM_RXCHAIN_LINK(rxq, m);
   8801 
   8802 		*rxq->rxq_tailp = NULL;
   8803 		m = rxq->rxq_head;
   8804 
   8805 		WM_RXCHAIN_RESET(rxq);
   8806 
   8807 		DPRINTF(WM_DEBUG_RX,
   8808 		    ("%s: RX: have entire packet, len -> %d\n",
   8809 			device_xname(sc->sc_dev), len));
   8810 
   8811 		/* If an error occurred, update stats and drop the packet. */
   8812 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8813 			m_freem(m);
   8814 			continue;
   8815 		}
   8816 
   8817 		/* No errors.  Receive the packet. */
   8818 		m_set_rcvif(m, ifp);
   8819 		m->m_pkthdr.len = len;
   8820 		/*
   8821 		 * TODO
   8822 		 * should be save rsshash and rsstype to this mbuf.
   8823 		 */
   8824 		DPRINTF(WM_DEBUG_RX,
   8825 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8826 			device_xname(sc->sc_dev), rsstype, rsshash));
   8827 
   8828 		/*
   8829 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8830 		 * for us.  Associate the tag with the packet.
   8831 		 */
   8832 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8833 			continue;
   8834 
   8835 		/* Set up checksum info for this packet. */
   8836 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8837 		/*
   8838 		 * Update the receive pointer holding rxq_lock consistent with
   8839 		 * increment counter.
   8840 		 */
   8841 		rxq->rxq_ptr = i;
   8842 		rxq->rxq_packets++;
   8843 		rxq->rxq_bytes += len;
   8844 		mutex_exit(rxq->rxq_lock);
   8845 
   8846 		/* Pass it on. */
   8847 		if_percpuq_enqueue(sc->sc_ipq, m);
   8848 
   8849 		mutex_enter(rxq->rxq_lock);
   8850 
   8851 		if (rxq->rxq_stopping)
   8852 			break;
   8853 	}
   8854 
   8855 	if (count != 0)
   8856 		rnd_add_uint32(&sc->rnd_source, count);
   8857 
   8858 	DPRINTF(WM_DEBUG_RX,
   8859 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8860 
   8861 	return more;
   8862 }
   8863 
   8864 /*
   8865  * wm_linkintr_gmii:
   8866  *
   8867  *	Helper; handle link interrupts for GMII.
   8868  */
   8869 static void
   8870 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8871 {
   8872 
   8873 	KASSERT(WM_CORE_LOCKED(sc));
   8874 
   8875 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8876 		__func__));
   8877 
   8878 	if (icr & ICR_LSC) {
   8879 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8880 		uint32_t reg;
   8881 		bool link;
   8882 
   8883 		link = status & STATUS_LU;
   8884 		if (link) {
   8885 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8886 				device_xname(sc->sc_dev),
   8887 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8888 		} else {
   8889 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8890 				device_xname(sc->sc_dev)));
   8891 		}
   8892 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8893 			wm_gig_downshift_workaround_ich8lan(sc);
   8894 
   8895 		if ((sc->sc_type == WM_T_ICH8)
   8896 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8897 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8898 		}
   8899 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8900 			device_xname(sc->sc_dev)));
   8901 		mii_pollstat(&sc->sc_mii);
   8902 		if (sc->sc_type == WM_T_82543) {
   8903 			int miistatus, active;
   8904 
   8905 			/*
   8906 			 * With 82543, we need to force speed and
   8907 			 * duplex on the MAC equal to what the PHY
   8908 			 * speed and duplex configuration is.
   8909 			 */
   8910 			miistatus = sc->sc_mii.mii_media_status;
   8911 
   8912 			if (miistatus & IFM_ACTIVE) {
   8913 				active = sc->sc_mii.mii_media_active;
   8914 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8915 				switch (IFM_SUBTYPE(active)) {
   8916 				case IFM_10_T:
   8917 					sc->sc_ctrl |= CTRL_SPEED_10;
   8918 					break;
   8919 				case IFM_100_TX:
   8920 					sc->sc_ctrl |= CTRL_SPEED_100;
   8921 					break;
   8922 				case IFM_1000_T:
   8923 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8924 					break;
   8925 				default:
   8926 					/*
   8927 					 * fiber?
   8928 					 * Shoud not enter here.
   8929 					 */
   8930 					printf("unknown media (%x)\n", active);
   8931 					break;
   8932 				}
   8933 				if (active & IFM_FDX)
   8934 					sc->sc_ctrl |= CTRL_FD;
   8935 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8936 			}
   8937 		} else if (sc->sc_type == WM_T_PCH) {
   8938 			wm_k1_gig_workaround_hv(sc,
   8939 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8940 		}
   8941 
   8942 		/*
   8943 		 * I217 Packet Loss issue:
   8944 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8945 		 * on power up.
   8946 		 * Set the Beacon Duration for I217 to 8 usec
   8947 		 */
   8948 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8949 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8950 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8951 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8952 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8953 		}
   8954 
   8955 		/* Work-around I218 hang issue */
   8956 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8957 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8958 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8959 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8960 			wm_k1_workaround_lpt_lp(sc, link);
   8961 
   8962 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8963 			/*
   8964 			 * Set platform power management values for Latency
   8965 			 * Tolerance Reporting (LTR)
   8966 			 */
   8967 			wm_platform_pm_pch_lpt(sc,
   8968 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8969 		}
   8970 
   8971 		/* FEXTNVM6 K1-off workaround */
   8972 		if (sc->sc_type == WM_T_PCH_SPT) {
   8973 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8974 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8975 			    & FEXTNVM6_K1_OFF_ENABLE)
   8976 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8977 			else
   8978 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8979 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8980 		}
   8981 
   8982 		if (!link)
   8983 			return;
   8984 
   8985 		switch (sc->sc_type) {
   8986 		case WM_T_PCH2:
   8987 			wm_k1_workaround_lv(sc);
   8988 			/* FALLTHROUGH */
   8989 		case WM_T_PCH:
   8990 			if (sc->sc_phytype == WMPHY_82578)
   8991 				wm_link_stall_workaround_hv(sc);
   8992 			break;
   8993 		default:
   8994 			break;
   8995 		}
   8996 	} else if (icr & ICR_RXSEQ) {
   8997 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8998 			device_xname(sc->sc_dev)));
   8999 	}
   9000 }
   9001 
   9002 /*
   9003  * wm_linkintr_tbi:
   9004  *
   9005  *	Helper; handle link interrupts for TBI mode.
   9006  */
   9007 static void
   9008 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9009 {
   9010 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9011 	uint32_t status;
   9012 
   9013 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9014 		__func__));
   9015 
   9016 	status = CSR_READ(sc, WMREG_STATUS);
   9017 	if (icr & ICR_LSC) {
   9018 		wm_check_for_link(sc);
   9019 		if (status & STATUS_LU) {
   9020 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9021 				device_xname(sc->sc_dev),
   9022 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9023 			/*
   9024 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9025 			 * so we should update sc->sc_ctrl
   9026 			 */
   9027 
   9028 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9029 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9030 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9031 			if (status & STATUS_FD)
   9032 				sc->sc_tctl |=
   9033 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9034 			else
   9035 				sc->sc_tctl |=
   9036 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9037 			if (sc->sc_ctrl & CTRL_TFCE)
   9038 				sc->sc_fcrtl |= FCRTL_XONE;
   9039 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9040 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9041 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9042 			sc->sc_tbi_linkup = 1;
   9043 			if_link_state_change(ifp, LINK_STATE_UP);
   9044 		} else {
   9045 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9046 				device_xname(sc->sc_dev)));
   9047 			sc->sc_tbi_linkup = 0;
   9048 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9049 		}
   9050 		/* Update LED */
   9051 		wm_tbi_serdes_set_linkled(sc);
   9052 	} else if (icr & ICR_RXSEQ) {
   9053 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9054 			device_xname(sc->sc_dev)));
   9055 	}
   9056 }
   9057 
   9058 /*
   9059  * wm_linkintr_serdes:
   9060  *
   9061  *	Helper; handle link interrupts for TBI mode.
   9062  */
   9063 static void
   9064 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9065 {
   9066 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9067 	struct mii_data *mii = &sc->sc_mii;
   9068 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9069 	uint32_t pcs_adv, pcs_lpab, reg;
   9070 
   9071 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9072 		__func__));
   9073 
   9074 	if (icr & ICR_LSC) {
   9075 		/* Check PCS */
   9076 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9077 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9078 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9079 				device_xname(sc->sc_dev)));
   9080 			mii->mii_media_status |= IFM_ACTIVE;
   9081 			sc->sc_tbi_linkup = 1;
   9082 			if_link_state_change(ifp, LINK_STATE_UP);
   9083 		} else {
   9084 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9085 				device_xname(sc->sc_dev)));
   9086 			mii->mii_media_status |= IFM_NONE;
   9087 			sc->sc_tbi_linkup = 0;
   9088 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9089 			wm_tbi_serdes_set_linkled(sc);
   9090 			return;
   9091 		}
   9092 		mii->mii_media_active |= IFM_1000_SX;
   9093 		if ((reg & PCS_LSTS_FDX) != 0)
   9094 			mii->mii_media_active |= IFM_FDX;
   9095 		else
   9096 			mii->mii_media_active |= IFM_HDX;
   9097 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9098 			/* Check flow */
   9099 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9100 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9101 				DPRINTF(WM_DEBUG_LINK,
   9102 				    ("XXX LINKOK but not ACOMP\n"));
   9103 				return;
   9104 			}
   9105 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9106 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9107 			DPRINTF(WM_DEBUG_LINK,
   9108 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9109 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9110 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9111 				mii->mii_media_active |= IFM_FLOW
   9112 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9113 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9114 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9115 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9116 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9117 				mii->mii_media_active |= IFM_FLOW
   9118 				    | IFM_ETH_TXPAUSE;
   9119 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9120 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9121 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9122 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9123 				mii->mii_media_active |= IFM_FLOW
   9124 				    | IFM_ETH_RXPAUSE;
   9125 		}
   9126 		/* Update LED */
   9127 		wm_tbi_serdes_set_linkled(sc);
   9128 	} else {
   9129 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9130 		    device_xname(sc->sc_dev)));
   9131 	}
   9132 }
   9133 
   9134 /*
   9135  * wm_linkintr:
   9136  *
   9137  *	Helper; handle link interrupts.
   9138  */
   9139 static void
   9140 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9141 {
   9142 
   9143 	KASSERT(WM_CORE_LOCKED(sc));
   9144 
   9145 	if (sc->sc_flags & WM_F_HAS_MII)
   9146 		wm_linkintr_gmii(sc, icr);
   9147 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9148 	    && (sc->sc_type >= WM_T_82575))
   9149 		wm_linkintr_serdes(sc, icr);
   9150 	else
   9151 		wm_linkintr_tbi(sc, icr);
   9152 }
   9153 
   9154 /*
   9155  * wm_intr_legacy:
   9156  *
   9157  *	Interrupt service routine for INTx and MSI.
   9158  */
   9159 static int
   9160 wm_intr_legacy(void *arg)
   9161 {
   9162 	struct wm_softc *sc = arg;
   9163 	struct wm_queue *wmq = &sc->sc_queue[0];
   9164 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9165 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9166 	uint32_t icr, rndval = 0;
   9167 	int handled = 0;
   9168 
   9169 	while (1 /* CONSTCOND */) {
   9170 		icr = CSR_READ(sc, WMREG_ICR);
   9171 		if ((icr & sc->sc_icr) == 0)
   9172 			break;
   9173 		if (handled == 0) {
   9174 			DPRINTF(WM_DEBUG_TX,
   9175 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9176 		}
   9177 		if (rndval == 0)
   9178 			rndval = icr;
   9179 
   9180 		mutex_enter(rxq->rxq_lock);
   9181 
   9182 		if (rxq->rxq_stopping) {
   9183 			mutex_exit(rxq->rxq_lock);
   9184 			break;
   9185 		}
   9186 
   9187 		handled = 1;
   9188 
   9189 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9190 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9191 			DPRINTF(WM_DEBUG_RX,
   9192 			    ("%s: RX: got Rx intr 0x%08x\n",
   9193 				device_xname(sc->sc_dev),
   9194 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9195 			WM_Q_EVCNT_INCR(rxq, intr);
   9196 		}
   9197 #endif
   9198 		/*
   9199 		 * wm_rxeof() does *not* call upper layer functions directly,
   9200 		 * as if_percpuq_enqueue() just call softint_schedule().
   9201 		 * So, we can call wm_rxeof() in interrupt context.
   9202 		 */
   9203 		wm_rxeof(rxq, UINT_MAX);
   9204 
   9205 		mutex_exit(rxq->rxq_lock);
   9206 		mutex_enter(txq->txq_lock);
   9207 
   9208 		if (txq->txq_stopping) {
   9209 			mutex_exit(txq->txq_lock);
   9210 			break;
   9211 		}
   9212 
   9213 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9214 		if (icr & ICR_TXDW) {
   9215 			DPRINTF(WM_DEBUG_TX,
   9216 			    ("%s: TX: got TXDW interrupt\n",
   9217 				device_xname(sc->sc_dev)));
   9218 			WM_Q_EVCNT_INCR(txq, txdw);
   9219 		}
   9220 #endif
   9221 		wm_txeof(txq, UINT_MAX);
   9222 
   9223 		mutex_exit(txq->txq_lock);
   9224 		WM_CORE_LOCK(sc);
   9225 
   9226 		if (sc->sc_core_stopping) {
   9227 			WM_CORE_UNLOCK(sc);
   9228 			break;
   9229 		}
   9230 
   9231 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9232 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9233 			wm_linkintr(sc, icr);
   9234 		}
   9235 
   9236 		WM_CORE_UNLOCK(sc);
   9237 
   9238 		if (icr & ICR_RXO) {
   9239 #if defined(WM_DEBUG)
   9240 			log(LOG_WARNING, "%s: Receive overrun\n",
   9241 			    device_xname(sc->sc_dev));
   9242 #endif /* defined(WM_DEBUG) */
   9243 		}
   9244 	}
   9245 
   9246 	rnd_add_uint32(&sc->rnd_source, rndval);
   9247 
   9248 	if (handled) {
   9249 		/* Try to get more packets going. */
   9250 		softint_schedule(wmq->wmq_si);
   9251 	}
   9252 
   9253 	return handled;
   9254 }
   9255 
   9256 static inline void
   9257 wm_txrxintr_disable(struct wm_queue *wmq)
   9258 {
   9259 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9260 
   9261 	if (sc->sc_type == WM_T_82574)
   9262 		CSR_WRITE(sc, WMREG_IMC,
   9263 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9264 	else if (sc->sc_type == WM_T_82575)
   9265 		CSR_WRITE(sc, WMREG_EIMC,
   9266 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9267 	else
   9268 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9269 }
   9270 
   9271 static inline void
   9272 wm_txrxintr_enable(struct wm_queue *wmq)
   9273 {
   9274 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9275 
   9276 	wm_itrs_calculate(sc, wmq);
   9277 
   9278 	/*
   9279 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9280 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9281 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9282 	 * while each wm_handle_queue(wmq) is runnig.
   9283 	 */
   9284 	if (sc->sc_type == WM_T_82574)
   9285 		CSR_WRITE(sc, WMREG_IMS,
   9286 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9287 	else if (sc->sc_type == WM_T_82575)
   9288 		CSR_WRITE(sc, WMREG_EIMS,
   9289 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9290 	else
   9291 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9292 }
   9293 
   9294 static int
   9295 wm_txrxintr_msix(void *arg)
   9296 {
   9297 	struct wm_queue *wmq = arg;
   9298 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9299 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9300 	struct wm_softc *sc = txq->txq_sc;
   9301 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9302 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9303 	bool txmore;
   9304 	bool rxmore;
   9305 
   9306 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9307 
   9308 	DPRINTF(WM_DEBUG_TX,
   9309 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9310 
   9311 	wm_txrxintr_disable(wmq);
   9312 
   9313 	mutex_enter(txq->txq_lock);
   9314 
   9315 	if (txq->txq_stopping) {
   9316 		mutex_exit(txq->txq_lock);
   9317 		return 0;
   9318 	}
   9319 
   9320 	WM_Q_EVCNT_INCR(txq, txdw);
   9321 	txmore = wm_txeof(txq, txlimit);
   9322 	/* wm_deferred start() is done in wm_handle_queue(). */
   9323 	mutex_exit(txq->txq_lock);
   9324 
   9325 	DPRINTF(WM_DEBUG_RX,
   9326 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9327 	mutex_enter(rxq->rxq_lock);
   9328 
   9329 	if (rxq->rxq_stopping) {
   9330 		mutex_exit(rxq->rxq_lock);
   9331 		return 0;
   9332 	}
   9333 
   9334 	WM_Q_EVCNT_INCR(rxq, intr);
   9335 	rxmore = wm_rxeof(rxq, rxlimit);
   9336 	mutex_exit(rxq->rxq_lock);
   9337 
   9338 	wm_itrs_writereg(sc, wmq);
   9339 
   9340 	if (txmore || rxmore)
   9341 		softint_schedule(wmq->wmq_si);
   9342 	else
   9343 		wm_txrxintr_enable(wmq);
   9344 
   9345 	return 1;
   9346 }
   9347 
   9348 static void
   9349 wm_handle_queue(void *arg)
   9350 {
   9351 	struct wm_queue *wmq = arg;
   9352 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9353 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9354 	struct wm_softc *sc = txq->txq_sc;
   9355 	u_int txlimit = sc->sc_tx_process_limit;
   9356 	u_int rxlimit = sc->sc_rx_process_limit;
   9357 	bool txmore;
   9358 	bool rxmore;
   9359 
   9360 	mutex_enter(txq->txq_lock);
   9361 	if (txq->txq_stopping) {
   9362 		mutex_exit(txq->txq_lock);
   9363 		return;
   9364 	}
   9365 	txmore = wm_txeof(txq, txlimit);
   9366 	wm_deferred_start_locked(txq);
   9367 	mutex_exit(txq->txq_lock);
   9368 
   9369 	mutex_enter(rxq->rxq_lock);
   9370 	if (rxq->rxq_stopping) {
   9371 		mutex_exit(rxq->rxq_lock);
   9372 		return;
   9373 	}
   9374 	WM_Q_EVCNT_INCR(rxq, defer);
   9375 	rxmore = wm_rxeof(rxq, rxlimit);
   9376 	mutex_exit(rxq->rxq_lock);
   9377 
   9378 	if (txmore || rxmore)
   9379 		softint_schedule(wmq->wmq_si);
   9380 	else
   9381 		wm_txrxintr_enable(wmq);
   9382 }
   9383 
   9384 /*
   9385  * wm_linkintr_msix:
   9386  *
   9387  *	Interrupt service routine for link status change for MSI-X.
   9388  */
   9389 static int
   9390 wm_linkintr_msix(void *arg)
   9391 {
   9392 	struct wm_softc *sc = arg;
   9393 	uint32_t reg;
   9394 	bool has_rxo;
   9395 
   9396 	DPRINTF(WM_DEBUG_LINK,
   9397 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9398 
   9399 	reg = CSR_READ(sc, WMREG_ICR);
   9400 	WM_CORE_LOCK(sc);
   9401 	if (sc->sc_core_stopping)
   9402 		goto out;
   9403 
   9404 	if ((reg & ICR_LSC) != 0) {
   9405 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9406 		wm_linkintr(sc, ICR_LSC);
   9407 	}
   9408 
   9409 	/*
   9410 	 * XXX 82574 MSI-X mode workaround
   9411 	 *
   9412 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9413 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9414 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9415 	 * interrupts by writing WMREG_ICS to process receive packets.
   9416 	 */
   9417 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9418 #if defined(WM_DEBUG)
   9419 		log(LOG_WARNING, "%s: Receive overrun\n",
   9420 		    device_xname(sc->sc_dev));
   9421 #endif /* defined(WM_DEBUG) */
   9422 
   9423 		has_rxo = true;
   9424 		/*
   9425 		 * The RXO interrupt is very high rate when receive traffic is
   9426 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9427 		 * interrupts. ICR_OTHER will be enabled at the end of
   9428 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9429 		 * ICR_RXQ(1) interrupts.
   9430 		 */
   9431 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9432 
   9433 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9434 	}
   9435 
   9436 
   9437 
   9438 out:
   9439 	WM_CORE_UNLOCK(sc);
   9440 
   9441 	if (sc->sc_type == WM_T_82574) {
   9442 		if (!has_rxo)
   9443 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9444 		else
   9445 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9446 	} else if (sc->sc_type == WM_T_82575)
   9447 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9448 	else
   9449 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9450 
   9451 	return 1;
   9452 }
   9453 
   9454 /*
   9455  * Media related.
   9456  * GMII, SGMII, TBI (and SERDES)
   9457  */
   9458 
   9459 /* Common */
   9460 
   9461 /*
   9462  * wm_tbi_serdes_set_linkled:
   9463  *
   9464  *	Update the link LED on TBI and SERDES devices.
   9465  */
   9466 static void
   9467 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9468 {
   9469 
   9470 	if (sc->sc_tbi_linkup)
   9471 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9472 	else
   9473 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9474 
   9475 	/* 82540 or newer devices are active low */
   9476 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9477 
   9478 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9479 }
   9480 
   9481 /* GMII related */
   9482 
   9483 /*
   9484  * wm_gmii_reset:
   9485  *
   9486  *	Reset the PHY.
   9487  */
   9488 static void
   9489 wm_gmii_reset(struct wm_softc *sc)
   9490 {
   9491 	uint32_t reg;
   9492 	int rv;
   9493 
   9494 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9495 		device_xname(sc->sc_dev), __func__));
   9496 
   9497 	rv = sc->phy.acquire(sc);
   9498 	if (rv != 0) {
   9499 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9500 		    __func__);
   9501 		return;
   9502 	}
   9503 
   9504 	switch (sc->sc_type) {
   9505 	case WM_T_82542_2_0:
   9506 	case WM_T_82542_2_1:
   9507 		/* null */
   9508 		break;
   9509 	case WM_T_82543:
   9510 		/*
   9511 		 * With 82543, we need to force speed and duplex on the MAC
   9512 		 * equal to what the PHY speed and duplex configuration is.
   9513 		 * In addition, we need to perform a hardware reset on the PHY
   9514 		 * to take it out of reset.
   9515 		 */
   9516 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9517 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9518 
   9519 		/* The PHY reset pin is active-low. */
   9520 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9521 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9522 		    CTRL_EXT_SWDPIN(4));
   9523 		reg |= CTRL_EXT_SWDPIO(4);
   9524 
   9525 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9526 		CSR_WRITE_FLUSH(sc);
   9527 		delay(10*1000);
   9528 
   9529 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9530 		CSR_WRITE_FLUSH(sc);
   9531 		delay(150);
   9532 #if 0
   9533 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9534 #endif
   9535 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9536 		break;
   9537 	case WM_T_82544:	/* reset 10000us */
   9538 	case WM_T_82540:
   9539 	case WM_T_82545:
   9540 	case WM_T_82545_3:
   9541 	case WM_T_82546:
   9542 	case WM_T_82546_3:
   9543 	case WM_T_82541:
   9544 	case WM_T_82541_2:
   9545 	case WM_T_82547:
   9546 	case WM_T_82547_2:
   9547 	case WM_T_82571:	/* reset 100us */
   9548 	case WM_T_82572:
   9549 	case WM_T_82573:
   9550 	case WM_T_82574:
   9551 	case WM_T_82575:
   9552 	case WM_T_82576:
   9553 	case WM_T_82580:
   9554 	case WM_T_I350:
   9555 	case WM_T_I354:
   9556 	case WM_T_I210:
   9557 	case WM_T_I211:
   9558 	case WM_T_82583:
   9559 	case WM_T_80003:
   9560 		/* generic reset */
   9561 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9562 		CSR_WRITE_FLUSH(sc);
   9563 		delay(20000);
   9564 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9565 		CSR_WRITE_FLUSH(sc);
   9566 		delay(20000);
   9567 
   9568 		if ((sc->sc_type == WM_T_82541)
   9569 		    || (sc->sc_type == WM_T_82541_2)
   9570 		    || (sc->sc_type == WM_T_82547)
   9571 		    || (sc->sc_type == WM_T_82547_2)) {
   9572 			/* workaround for igp are done in igp_reset() */
   9573 			/* XXX add code to set LED after phy reset */
   9574 		}
   9575 		break;
   9576 	case WM_T_ICH8:
   9577 	case WM_T_ICH9:
   9578 	case WM_T_ICH10:
   9579 	case WM_T_PCH:
   9580 	case WM_T_PCH2:
   9581 	case WM_T_PCH_LPT:
   9582 	case WM_T_PCH_SPT:
   9583 	case WM_T_PCH_CNP:
   9584 		/* generic reset */
   9585 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9586 		CSR_WRITE_FLUSH(sc);
   9587 		delay(100);
   9588 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9589 		CSR_WRITE_FLUSH(sc);
   9590 		delay(150);
   9591 		break;
   9592 	default:
   9593 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9594 		    __func__);
   9595 		break;
   9596 	}
   9597 
   9598 	sc->phy.release(sc);
   9599 
   9600 	/* get_cfg_done */
   9601 	wm_get_cfg_done(sc);
   9602 
   9603 	/* extra setup */
   9604 	switch (sc->sc_type) {
   9605 	case WM_T_82542_2_0:
   9606 	case WM_T_82542_2_1:
   9607 	case WM_T_82543:
   9608 	case WM_T_82544:
   9609 	case WM_T_82540:
   9610 	case WM_T_82545:
   9611 	case WM_T_82545_3:
   9612 	case WM_T_82546:
   9613 	case WM_T_82546_3:
   9614 	case WM_T_82541_2:
   9615 	case WM_T_82547_2:
   9616 	case WM_T_82571:
   9617 	case WM_T_82572:
   9618 	case WM_T_82573:
   9619 	case WM_T_82574:
   9620 	case WM_T_82583:
   9621 	case WM_T_82575:
   9622 	case WM_T_82576:
   9623 	case WM_T_82580:
   9624 	case WM_T_I350:
   9625 	case WM_T_I354:
   9626 	case WM_T_I210:
   9627 	case WM_T_I211:
   9628 	case WM_T_80003:
   9629 		/* null */
   9630 		break;
   9631 	case WM_T_82541:
   9632 	case WM_T_82547:
   9633 		/* XXX Configure actively LED after PHY reset */
   9634 		break;
   9635 	case WM_T_ICH8:
   9636 	case WM_T_ICH9:
   9637 	case WM_T_ICH10:
   9638 	case WM_T_PCH:
   9639 	case WM_T_PCH2:
   9640 	case WM_T_PCH_LPT:
   9641 	case WM_T_PCH_SPT:
   9642 	case WM_T_PCH_CNP:
   9643 		wm_phy_post_reset(sc);
   9644 		break;
   9645 	default:
   9646 		panic("%s: unknown type\n", __func__);
   9647 		break;
   9648 	}
   9649 }
   9650 
   9651 /*
   9652  * Setup sc_phytype and mii_{read|write}reg.
   9653  *
   9654  *  To identify PHY type, correct read/write function should be selected.
   9655  * To select correct read/write function, PCI ID or MAC type are required
   9656  * without accessing PHY registers.
   9657  *
   9658  *  On the first call of this function, PHY ID is not known yet. Check
   9659  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9660  * result might be incorrect.
   9661  *
   9662  *  In the second call, PHY OUI and model is used to identify PHY type.
   9663  * It might not be perfpect because of the lack of compared entry, but it
   9664  * would be better than the first call.
   9665  *
   9666  *  If the detected new result and previous assumption is different,
   9667  * diagnous message will be printed.
   9668  */
   9669 static void
   9670 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9671     uint16_t phy_model)
   9672 {
   9673 	device_t dev = sc->sc_dev;
   9674 	struct mii_data *mii = &sc->sc_mii;
   9675 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9676 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9677 	mii_readreg_t new_readreg;
   9678 	mii_writereg_t new_writereg;
   9679 
   9680 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9681 		device_xname(sc->sc_dev), __func__));
   9682 
   9683 	if (mii->mii_readreg == NULL) {
   9684 		/*
   9685 		 *  This is the first call of this function. For ICH and PCH
   9686 		 * variants, it's difficult to determine the PHY access method
   9687 		 * by sc_type, so use the PCI product ID for some devices.
   9688 		 */
   9689 
   9690 		switch (sc->sc_pcidevid) {
   9691 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9692 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9693 			/* 82577 */
   9694 			new_phytype = WMPHY_82577;
   9695 			break;
   9696 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9697 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9698 			/* 82578 */
   9699 			new_phytype = WMPHY_82578;
   9700 			break;
   9701 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9702 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9703 			/* 82579 */
   9704 			new_phytype = WMPHY_82579;
   9705 			break;
   9706 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9707 		case PCI_PRODUCT_INTEL_82801I_BM:
   9708 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9709 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9710 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9711 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9712 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9713 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9714 			/* ICH8, 9, 10 with 82567 */
   9715 			new_phytype = WMPHY_BM;
   9716 			break;
   9717 		default:
   9718 			break;
   9719 		}
   9720 	} else {
   9721 		/* It's not the first call. Use PHY OUI and model */
   9722 		switch (phy_oui) {
   9723 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9724 			switch (phy_model) {
   9725 			case 0x0004: /* XXX */
   9726 				new_phytype = WMPHY_82578;
   9727 				break;
   9728 			default:
   9729 				break;
   9730 			}
   9731 			break;
   9732 		case MII_OUI_xxMARVELL:
   9733 			switch (phy_model) {
   9734 			case MII_MODEL_xxMARVELL_I210:
   9735 				new_phytype = WMPHY_I210;
   9736 				break;
   9737 			case MII_MODEL_xxMARVELL_E1011:
   9738 			case MII_MODEL_xxMARVELL_E1000_3:
   9739 			case MII_MODEL_xxMARVELL_E1000_5:
   9740 			case MII_MODEL_xxMARVELL_E1112:
   9741 				new_phytype = WMPHY_M88;
   9742 				break;
   9743 			case MII_MODEL_xxMARVELL_E1149:
   9744 				new_phytype = WMPHY_BM;
   9745 				break;
   9746 			case MII_MODEL_xxMARVELL_E1111:
   9747 			case MII_MODEL_xxMARVELL_I347:
   9748 			case MII_MODEL_xxMARVELL_E1512:
   9749 			case MII_MODEL_xxMARVELL_E1340M:
   9750 			case MII_MODEL_xxMARVELL_E1543:
   9751 				new_phytype = WMPHY_M88;
   9752 				break;
   9753 			case MII_MODEL_xxMARVELL_I82563:
   9754 				new_phytype = WMPHY_GG82563;
   9755 				break;
   9756 			default:
   9757 				break;
   9758 			}
   9759 			break;
   9760 		case MII_OUI_INTEL:
   9761 			switch (phy_model) {
   9762 			case MII_MODEL_INTEL_I82577:
   9763 				new_phytype = WMPHY_82577;
   9764 				break;
   9765 			case MII_MODEL_INTEL_I82579:
   9766 				new_phytype = WMPHY_82579;
   9767 				break;
   9768 			case MII_MODEL_INTEL_I217:
   9769 				new_phytype = WMPHY_I217;
   9770 				break;
   9771 			case MII_MODEL_INTEL_I82580:
   9772 			case MII_MODEL_INTEL_I350:
   9773 				new_phytype = WMPHY_82580;
   9774 				break;
   9775 			default:
   9776 				break;
   9777 			}
   9778 			break;
   9779 		case MII_OUI_yyINTEL:
   9780 			switch (phy_model) {
   9781 			case MII_MODEL_yyINTEL_I82562G:
   9782 			case MII_MODEL_yyINTEL_I82562EM:
   9783 			case MII_MODEL_yyINTEL_I82562ET:
   9784 				new_phytype = WMPHY_IFE;
   9785 				break;
   9786 			case MII_MODEL_yyINTEL_IGP01E1000:
   9787 				new_phytype = WMPHY_IGP;
   9788 				break;
   9789 			case MII_MODEL_yyINTEL_I82566:
   9790 				new_phytype = WMPHY_IGP_3;
   9791 				break;
   9792 			default:
   9793 				break;
   9794 			}
   9795 			break;
   9796 		default:
   9797 			break;
   9798 		}
   9799 		if (new_phytype == WMPHY_UNKNOWN)
   9800 			aprint_verbose_dev(dev,
   9801 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9802 			    __func__, phy_oui, phy_model);
   9803 
   9804 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9805 		    && (sc->sc_phytype != new_phytype )) {
   9806 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9807 			    "was incorrect. PHY type from PHY ID = %u\n",
   9808 			    sc->sc_phytype, new_phytype);
   9809 		}
   9810 	}
   9811 
   9812 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9813 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9814 		/* SGMII */
   9815 		new_readreg = wm_sgmii_readreg;
   9816 		new_writereg = wm_sgmii_writereg;
   9817 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9818 		/* BM2 (phyaddr == 1) */
   9819 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9820 		    && (new_phytype != WMPHY_BM)
   9821 		    && (new_phytype != WMPHY_UNKNOWN))
   9822 			doubt_phytype = new_phytype;
   9823 		new_phytype = WMPHY_BM;
   9824 		new_readreg = wm_gmii_bm_readreg;
   9825 		new_writereg = wm_gmii_bm_writereg;
   9826 	} else if (sc->sc_type >= WM_T_PCH) {
   9827 		/* All PCH* use _hv_ */
   9828 		new_readreg = wm_gmii_hv_readreg;
   9829 		new_writereg = wm_gmii_hv_writereg;
   9830 	} else if (sc->sc_type >= WM_T_ICH8) {
   9831 		/* non-82567 ICH8, 9 and 10 */
   9832 		new_readreg = wm_gmii_i82544_readreg;
   9833 		new_writereg = wm_gmii_i82544_writereg;
   9834 	} else if (sc->sc_type >= WM_T_80003) {
   9835 		/* 80003 */
   9836 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9837 		    && (new_phytype != WMPHY_GG82563)
   9838 		    && (new_phytype != WMPHY_UNKNOWN))
   9839 			doubt_phytype = new_phytype;
   9840 		new_phytype = WMPHY_GG82563;
   9841 		new_readreg = wm_gmii_i80003_readreg;
   9842 		new_writereg = wm_gmii_i80003_writereg;
   9843 	} else if (sc->sc_type >= WM_T_I210) {
   9844 		/* I210 and I211 */
   9845 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9846 		    && (new_phytype != WMPHY_I210)
   9847 		    && (new_phytype != WMPHY_UNKNOWN))
   9848 			doubt_phytype = new_phytype;
   9849 		new_phytype = WMPHY_I210;
   9850 		new_readreg = wm_gmii_gs40g_readreg;
   9851 		new_writereg = wm_gmii_gs40g_writereg;
   9852 	} else if (sc->sc_type >= WM_T_82580) {
   9853 		/* 82580, I350 and I354 */
   9854 		new_readreg = wm_gmii_82580_readreg;
   9855 		new_writereg = wm_gmii_82580_writereg;
   9856 	} else if (sc->sc_type >= WM_T_82544) {
   9857 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9858 		new_readreg = wm_gmii_i82544_readreg;
   9859 		new_writereg = wm_gmii_i82544_writereg;
   9860 	} else {
   9861 		new_readreg = wm_gmii_i82543_readreg;
   9862 		new_writereg = wm_gmii_i82543_writereg;
   9863 	}
   9864 
   9865 	if (new_phytype == WMPHY_BM) {
   9866 		/* All BM use _bm_ */
   9867 		new_readreg = wm_gmii_bm_readreg;
   9868 		new_writereg = wm_gmii_bm_writereg;
   9869 	}
   9870 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9871 		/* All PCH* use _hv_ */
   9872 		new_readreg = wm_gmii_hv_readreg;
   9873 		new_writereg = wm_gmii_hv_writereg;
   9874 	}
   9875 
   9876 	/* Diag output */
   9877 	if (doubt_phytype != WMPHY_UNKNOWN)
   9878 		aprint_error_dev(dev, "Assumed new PHY type was "
   9879 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9880 		    new_phytype);
   9881 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9882 	    && (sc->sc_phytype != new_phytype ))
   9883 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9884 		    "was incorrect. New PHY type = %u\n",
   9885 		    sc->sc_phytype, new_phytype);
   9886 
   9887 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9888 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9889 
   9890 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9891 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9892 		    "function was incorrect.\n");
   9893 
   9894 	/* Update now */
   9895 	sc->sc_phytype = new_phytype;
   9896 	mii->mii_readreg = new_readreg;
   9897 	mii->mii_writereg = new_writereg;
   9898 	if (new_readreg == wm_gmii_hv_readreg) {
   9899 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9900 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9901 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9902 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9903 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9904 	}
   9905 }
   9906 
   9907 /*
   9908  * wm_get_phy_id_82575:
   9909  *
   9910  * Return PHY ID. Return -1 if it failed.
   9911  */
   9912 static int
   9913 wm_get_phy_id_82575(struct wm_softc *sc)
   9914 {
   9915 	uint32_t reg;
   9916 	int phyid = -1;
   9917 
   9918 	/* XXX */
   9919 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9920 		return -1;
   9921 
   9922 	if (wm_sgmii_uses_mdio(sc)) {
   9923 		switch (sc->sc_type) {
   9924 		case WM_T_82575:
   9925 		case WM_T_82576:
   9926 			reg = CSR_READ(sc, WMREG_MDIC);
   9927 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9928 			break;
   9929 		case WM_T_82580:
   9930 		case WM_T_I350:
   9931 		case WM_T_I354:
   9932 		case WM_T_I210:
   9933 		case WM_T_I211:
   9934 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9935 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9936 			break;
   9937 		default:
   9938 			return -1;
   9939 		}
   9940 	}
   9941 
   9942 	return phyid;
   9943 }
   9944 
   9945 
   9946 /*
   9947  * wm_gmii_mediainit:
   9948  *
   9949  *	Initialize media for use on 1000BASE-T devices.
   9950  */
   9951 static void
   9952 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9953 {
   9954 	device_t dev = sc->sc_dev;
   9955 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9956 	struct mii_data *mii = &sc->sc_mii;
   9957 	uint32_t reg;
   9958 
   9959 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9960 		device_xname(sc->sc_dev), __func__));
   9961 
   9962 	/* We have GMII. */
   9963 	sc->sc_flags |= WM_F_HAS_MII;
   9964 
   9965 	if (sc->sc_type == WM_T_80003)
   9966 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9967 	else
   9968 		sc->sc_tipg = TIPG_1000T_DFLT;
   9969 
   9970 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9971 	if ((sc->sc_type == WM_T_82580)
   9972 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9973 	    || (sc->sc_type == WM_T_I211)) {
   9974 		reg = CSR_READ(sc, WMREG_PHPM);
   9975 		reg &= ~PHPM_GO_LINK_D;
   9976 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9977 	}
   9978 
   9979 	/*
   9980 	 * Let the chip set speed/duplex on its own based on
   9981 	 * signals from the PHY.
   9982 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9983 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9984 	 */
   9985 	sc->sc_ctrl |= CTRL_SLU;
   9986 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9987 
   9988 	/* Initialize our media structures and probe the GMII. */
   9989 	mii->mii_ifp = ifp;
   9990 
   9991 	mii->mii_statchg = wm_gmii_statchg;
   9992 
   9993 	/* get PHY control from SMBus to PCIe */
   9994 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9995 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9996 	    || (sc->sc_type == WM_T_PCH_CNP))
   9997 		wm_init_phy_workarounds_pchlan(sc);
   9998 
   9999 	wm_gmii_reset(sc);
   10000 
   10001 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10002 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10003 	    wm_gmii_mediastatus);
   10004 
   10005 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10006 	    || (sc->sc_type == WM_T_82580)
   10007 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10008 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10009 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10010 			/* Attach only one port */
   10011 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10012 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10013 		} else {
   10014 			int i, id;
   10015 			uint32_t ctrl_ext;
   10016 
   10017 			id = wm_get_phy_id_82575(sc);
   10018 			if (id != -1) {
   10019 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10020 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10021 			}
   10022 			if ((id == -1)
   10023 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10024 				/* Power on sgmii phy if it is disabled */
   10025 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10026 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10027 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10028 				CSR_WRITE_FLUSH(sc);
   10029 				delay(300*1000); /* XXX too long */
   10030 
   10031 				/* from 1 to 8 */
   10032 				for (i = 1; i < 8; i++)
   10033 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10034 					    0xffffffff, i, MII_OFFSET_ANY,
   10035 					    MIIF_DOPAUSE);
   10036 
   10037 				/* restore previous sfp cage power state */
   10038 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10039 			}
   10040 		}
   10041 	} else
   10042 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10043 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10044 
   10045 	/*
   10046 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10047 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10048 	 */
   10049 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10050 		|| (sc->sc_type == WM_T_PCH_SPT)
   10051 		|| (sc->sc_type == WM_T_PCH_CNP))
   10052 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10053 		wm_set_mdio_slow_mode_hv(sc);
   10054 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10055 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10056 	}
   10057 
   10058 	/*
   10059 	 * (For ICH8 variants)
   10060 	 * If PHY detection failed, use BM's r/w function and retry.
   10061 	 */
   10062 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10063 		/* if failed, retry with *_bm_* */
   10064 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10065 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10066 		    sc->sc_phytype);
   10067 		sc->sc_phytype = WMPHY_BM;
   10068 		mii->mii_readreg = wm_gmii_bm_readreg;
   10069 		mii->mii_writereg = wm_gmii_bm_writereg;
   10070 
   10071 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10072 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10073 	}
   10074 
   10075 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10076 		/* Any PHY wasn't find */
   10077 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10078 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10079 		sc->sc_phytype = WMPHY_NONE;
   10080 	} else {
   10081 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10082 
   10083 		/*
   10084 		 * PHY Found! Check PHY type again by the second call of
   10085 		 * wm_gmii_setup_phytype.
   10086 		 */
   10087 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10088 		    child->mii_mpd_model);
   10089 
   10090 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10091 	}
   10092 }
   10093 
   10094 /*
   10095  * wm_gmii_mediachange:	[ifmedia interface function]
   10096  *
   10097  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10098  */
   10099 static int
   10100 wm_gmii_mediachange(struct ifnet *ifp)
   10101 {
   10102 	struct wm_softc *sc = ifp->if_softc;
   10103 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10104 	int rc;
   10105 
   10106 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10107 		device_xname(sc->sc_dev), __func__));
   10108 	if ((ifp->if_flags & IFF_UP) == 0)
   10109 		return 0;
   10110 
   10111 	/* Disable D0 LPLU. */
   10112 	wm_lplu_d0_disable(sc);
   10113 
   10114 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10115 	sc->sc_ctrl |= CTRL_SLU;
   10116 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10117 	    || (sc->sc_type > WM_T_82543)) {
   10118 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10119 	} else {
   10120 		sc->sc_ctrl &= ~CTRL_ASDE;
   10121 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10122 		if (ife->ifm_media & IFM_FDX)
   10123 			sc->sc_ctrl |= CTRL_FD;
   10124 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10125 		case IFM_10_T:
   10126 			sc->sc_ctrl |= CTRL_SPEED_10;
   10127 			break;
   10128 		case IFM_100_TX:
   10129 			sc->sc_ctrl |= CTRL_SPEED_100;
   10130 			break;
   10131 		case IFM_1000_T:
   10132 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10133 			break;
   10134 		default:
   10135 			panic("wm_gmii_mediachange: bad media 0x%x",
   10136 			    ife->ifm_media);
   10137 		}
   10138 	}
   10139 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10140 	CSR_WRITE_FLUSH(sc);
   10141 	if (sc->sc_type <= WM_T_82543)
   10142 		wm_gmii_reset(sc);
   10143 
   10144 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10145 		return 0;
   10146 	return rc;
   10147 }
   10148 
   10149 /*
   10150  * wm_gmii_mediastatus:	[ifmedia interface function]
   10151  *
   10152  *	Get the current interface media status on a 1000BASE-T device.
   10153  */
   10154 static void
   10155 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10156 {
   10157 	struct wm_softc *sc = ifp->if_softc;
   10158 
   10159 	ether_mediastatus(ifp, ifmr);
   10160 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10161 	    | sc->sc_flowflags;
   10162 }
   10163 
   10164 #define	MDI_IO		CTRL_SWDPIN(2)
   10165 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10166 #define	MDI_CLK		CTRL_SWDPIN(3)
   10167 
   10168 static void
   10169 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10170 {
   10171 	uint32_t i, v;
   10172 
   10173 	v = CSR_READ(sc, WMREG_CTRL);
   10174 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10175 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10176 
   10177 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10178 		if (data & i)
   10179 			v |= MDI_IO;
   10180 		else
   10181 			v &= ~MDI_IO;
   10182 		CSR_WRITE(sc, WMREG_CTRL, v);
   10183 		CSR_WRITE_FLUSH(sc);
   10184 		delay(10);
   10185 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10186 		CSR_WRITE_FLUSH(sc);
   10187 		delay(10);
   10188 		CSR_WRITE(sc, WMREG_CTRL, v);
   10189 		CSR_WRITE_FLUSH(sc);
   10190 		delay(10);
   10191 	}
   10192 }
   10193 
   10194 static uint32_t
   10195 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10196 {
   10197 	uint32_t v, i, data = 0;
   10198 
   10199 	v = CSR_READ(sc, WMREG_CTRL);
   10200 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10201 	v |= CTRL_SWDPIO(3);
   10202 
   10203 	CSR_WRITE(sc, WMREG_CTRL, v);
   10204 	CSR_WRITE_FLUSH(sc);
   10205 	delay(10);
   10206 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10207 	CSR_WRITE_FLUSH(sc);
   10208 	delay(10);
   10209 	CSR_WRITE(sc, WMREG_CTRL, v);
   10210 	CSR_WRITE_FLUSH(sc);
   10211 	delay(10);
   10212 
   10213 	for (i = 0; i < 16; i++) {
   10214 		data <<= 1;
   10215 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10216 		CSR_WRITE_FLUSH(sc);
   10217 		delay(10);
   10218 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10219 			data |= 1;
   10220 		CSR_WRITE(sc, WMREG_CTRL, v);
   10221 		CSR_WRITE_FLUSH(sc);
   10222 		delay(10);
   10223 	}
   10224 
   10225 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10226 	CSR_WRITE_FLUSH(sc);
   10227 	delay(10);
   10228 	CSR_WRITE(sc, WMREG_CTRL, v);
   10229 	CSR_WRITE_FLUSH(sc);
   10230 	delay(10);
   10231 
   10232 	return data;
   10233 }
   10234 
   10235 #undef MDI_IO
   10236 #undef MDI_DIR
   10237 #undef MDI_CLK
   10238 
   10239 /*
   10240  * wm_gmii_i82543_readreg:	[mii interface function]
   10241  *
   10242  *	Read a PHY register on the GMII (i82543 version).
   10243  */
   10244 static int
   10245 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10246 {
   10247 	struct wm_softc *sc = device_private(dev);
   10248 	int rv;
   10249 
   10250 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10251 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10252 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10253 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10254 
   10255 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10256 		device_xname(dev), phy, reg, rv));
   10257 
   10258 	return rv;
   10259 }
   10260 
   10261 /*
   10262  * wm_gmii_i82543_writereg:	[mii interface function]
   10263  *
   10264  *	Write a PHY register on the GMII (i82543 version).
   10265  */
   10266 static void
   10267 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10268 {
   10269 	struct wm_softc *sc = device_private(dev);
   10270 
   10271 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10272 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10273 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10274 	    (MII_COMMAND_START << 30), 32);
   10275 }
   10276 
   10277 /*
   10278  * wm_gmii_mdic_readreg:	[mii interface function]
   10279  *
   10280  *	Read a PHY register on the GMII.
   10281  */
   10282 static int
   10283 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10284 {
   10285 	struct wm_softc *sc = device_private(dev);
   10286 	uint32_t mdic = 0;
   10287 	int i, rv;
   10288 
   10289 	if (reg > MII_ADDRMASK) {
   10290 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10291 		    __func__, sc->sc_phytype, reg);
   10292 		reg &= MII_ADDRMASK;
   10293 	}
   10294 
   10295 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10296 	    MDIC_REGADD(reg));
   10297 
   10298 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10299 		delay(50);
   10300 		mdic = CSR_READ(sc, WMREG_MDIC);
   10301 		if (mdic & MDIC_READY)
   10302 			break;
   10303 	}
   10304 
   10305 	if ((mdic & MDIC_READY) == 0) {
   10306 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10307 		    device_xname(dev), phy, reg);
   10308 		return 0;
   10309 	} else if (mdic & MDIC_E) {
   10310 #if 0 /* This is normal if no PHY is present. */
   10311 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10312 		    device_xname(dev), phy, reg);
   10313 #endif
   10314 		return 0;
   10315 	} else {
   10316 		rv = MDIC_DATA(mdic);
   10317 		if (rv == 0xffff)
   10318 			rv = 0;
   10319 	}
   10320 
   10321 	/*
   10322 	 * Allow some time after each MDIC transaction to avoid
   10323 	 * reading duplicate data in the next MDIC transaction.
   10324 	 */
   10325 	if (sc->sc_type == WM_T_PCH2)
   10326 		delay(100);
   10327 
   10328 	return rv;
   10329 }
   10330 
   10331 /*
   10332  * wm_gmii_mdic_writereg:	[mii interface function]
   10333  *
   10334  *	Write a PHY register on the GMII.
   10335  */
   10336 static void
   10337 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10338 {
   10339 	struct wm_softc *sc = device_private(dev);
   10340 	uint32_t mdic = 0;
   10341 	int i;
   10342 
   10343 	if (reg > MII_ADDRMASK) {
   10344 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10345 		    __func__, sc->sc_phytype, reg);
   10346 		reg &= MII_ADDRMASK;
   10347 	}
   10348 
   10349 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10350 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10351 
   10352 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10353 		delay(50);
   10354 		mdic = CSR_READ(sc, WMREG_MDIC);
   10355 		if (mdic & MDIC_READY)
   10356 			break;
   10357 	}
   10358 
   10359 	if ((mdic & MDIC_READY) == 0) {
   10360 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10361 		    device_xname(dev), phy, reg);
   10362 		return;
   10363 	} else if (mdic & MDIC_E) {
   10364 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10365 		    device_xname(dev), phy, reg);
   10366 		return;
   10367 	}
   10368 
   10369 	/*
   10370 	 * Allow some time after each MDIC transaction to avoid
   10371 	 * reading duplicate data in the next MDIC transaction.
   10372 	 */
   10373 	if (sc->sc_type == WM_T_PCH2)
   10374 		delay(100);
   10375 }
   10376 
   10377 /*
   10378  * wm_gmii_i82544_readreg:	[mii interface function]
   10379  *
   10380  *	Read a PHY register on the GMII.
   10381  */
   10382 static int
   10383 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10384 {
   10385 	struct wm_softc *sc = device_private(dev);
   10386 	uint16_t val;
   10387 
   10388 	if (sc->phy.acquire(sc)) {
   10389 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10390 		return 0;
   10391 	}
   10392 
   10393 	wm_gmii_i82544_readreg_locked(dev, phy, reg, &val);
   10394 
   10395 	sc->phy.release(sc);
   10396 
   10397 	return val;
   10398 }
   10399 
   10400 static int
   10401 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10402 {
   10403 	struct wm_softc *sc = device_private(dev);
   10404 
   10405 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10406 		switch (sc->sc_phytype) {
   10407 		case WMPHY_IGP:
   10408 		case WMPHY_IGP_2:
   10409 		case WMPHY_IGP_3:
   10410 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10411 			    reg);
   10412 			break;
   10413 		default:
   10414 #ifdef WM_DEBUG
   10415 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10416 			    __func__, sc->sc_phytype, reg);
   10417 #endif
   10418 			break;
   10419 		}
   10420 	}
   10421 
   10422 	*val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10423 
   10424 	return 0;
   10425 }
   10426 
   10427 /*
   10428  * wm_gmii_i82544_writereg:	[mii interface function]
   10429  *
   10430  *	Write a PHY register on the GMII.
   10431  */
   10432 static void
   10433 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10434 {
   10435 	struct wm_softc *sc = device_private(dev);
   10436 
   10437 	if (sc->phy.acquire(sc)) {
   10438 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10439 		return;
   10440 	}
   10441 
   10442 	wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10443 	sc->phy.release(sc);
   10444 }
   10445 
   10446 static int
   10447 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10448 {
   10449 	struct wm_softc *sc = device_private(dev);
   10450 
   10451 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10452 		switch (sc->sc_phytype) {
   10453 		case WMPHY_IGP:
   10454 		case WMPHY_IGP_2:
   10455 		case WMPHY_IGP_3:
   10456 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10457 			    reg);
   10458 			break;
   10459 		default:
   10460 #ifdef WM_DEBUG
   10461 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10462 			    __func__, sc->sc_phytype, reg);
   10463 #endif
   10464 			break;
   10465 		}
   10466 	}
   10467 
   10468 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10469 
   10470 	return 0;
   10471 }
   10472 
   10473 /*
   10474  * wm_gmii_i80003_readreg:	[mii interface function]
   10475  *
   10476  *	Read a PHY register on the kumeran
   10477  * This could be handled by the PHY layer if we didn't have to lock the
   10478  * ressource ...
   10479  */
   10480 static int
   10481 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10482 {
   10483 	struct wm_softc *sc = device_private(dev);
   10484 	int page_select, temp;
   10485 	int rv;
   10486 
   10487 	if (phy != 1) /* only one PHY on kumeran bus */
   10488 		return 0;
   10489 
   10490 	if (sc->phy.acquire(sc)) {
   10491 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10492 		return 0;
   10493 	}
   10494 
   10495 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10496 		page_select = GG82563_PHY_PAGE_SELECT;
   10497 	else {
   10498 		/*
   10499 		 * Use Alternative Page Select register to access registers
   10500 		 * 30 and 31.
   10501 		 */
   10502 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10503 	}
   10504 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10505 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10506 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10507 		/*
   10508 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10509 		 * register.
   10510 		 */
   10511 		delay(200);
   10512 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10513 			device_printf(dev, "%s failed\n", __func__);
   10514 			rv = 0; /* XXX */
   10515 			goto out;
   10516 		}
   10517 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10518 		delay(200);
   10519 	} else
   10520 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10521 
   10522 out:
   10523 	sc->phy.release(sc);
   10524 	return rv;
   10525 }
   10526 
   10527 /*
   10528  * wm_gmii_i80003_writereg:	[mii interface function]
   10529  *
   10530  *	Write a PHY register on the kumeran.
   10531  * This could be handled by the PHY layer if we didn't have to lock the
   10532  * ressource ...
   10533  */
   10534 static void
   10535 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10536 {
   10537 	struct wm_softc *sc = device_private(dev);
   10538 	int page_select, temp;
   10539 
   10540 	if (phy != 1) /* only one PHY on kumeran bus */
   10541 		return;
   10542 
   10543 	if (sc->phy.acquire(sc)) {
   10544 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10545 		return;
   10546 	}
   10547 
   10548 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10549 		page_select = GG82563_PHY_PAGE_SELECT;
   10550 	else {
   10551 		/*
   10552 		 * Use Alternative Page Select register to access registers
   10553 		 * 30 and 31.
   10554 		 */
   10555 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10556 	}
   10557 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10558 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10559 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10560 		/*
   10561 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10562 		 * register.
   10563 		 */
   10564 		delay(200);
   10565 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10566 			device_printf(dev, "%s failed\n", __func__);
   10567 			goto out;
   10568 		}
   10569 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10570 		delay(200);
   10571 	} else
   10572 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10573 
   10574 out:
   10575 	sc->phy.release(sc);
   10576 }
   10577 
   10578 /*
   10579  * wm_gmii_bm_readreg:	[mii interface function]
   10580  *
   10581  *	Read a PHY register on the kumeran
   10582  * This could be handled by the PHY layer if we didn't have to lock the
   10583  * ressource ...
   10584  */
   10585 static int
   10586 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10587 {
   10588 	struct wm_softc *sc = device_private(dev);
   10589 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10590 	uint16_t val;
   10591 	int rv;
   10592 
   10593 	if (sc->phy.acquire(sc)) {
   10594 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10595 		return 0;
   10596 	}
   10597 
   10598 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10599 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10600 		    || (reg == 31)) ? 1 : phy;
   10601 	/* Page 800 works differently than the rest so it has its own func */
   10602 	if (page == BM_WUC_PAGE) {
   10603 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10604 		rv = val;
   10605 		goto release;
   10606 	}
   10607 
   10608 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10609 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10610 		    && (sc->sc_type != WM_T_82583))
   10611 			wm_gmii_mdic_writereg(dev, phy,
   10612 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10613 		else
   10614 			wm_gmii_mdic_writereg(dev, phy,
   10615 			    BME1000_PHY_PAGE_SELECT, page);
   10616 	}
   10617 
   10618 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10619 
   10620 release:
   10621 	sc->phy.release(sc);
   10622 	return rv;
   10623 }
   10624 
   10625 /*
   10626  * wm_gmii_bm_writereg:	[mii interface function]
   10627  *
   10628  *	Write a PHY register on the kumeran.
   10629  * This could be handled by the PHY layer if we didn't have to lock the
   10630  * ressource ...
   10631  */
   10632 static void
   10633 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10634 {
   10635 	struct wm_softc *sc = device_private(dev);
   10636 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10637 
   10638 	if (sc->phy.acquire(sc)) {
   10639 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10640 		return;
   10641 	}
   10642 
   10643 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10644 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10645 		    || (reg == 31)) ? 1 : phy;
   10646 	/* Page 800 works differently than the rest so it has its own func */
   10647 	if (page == BM_WUC_PAGE) {
   10648 		uint16_t tmp;
   10649 
   10650 		tmp = val;
   10651 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10652 		goto release;
   10653 	}
   10654 
   10655 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10656 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10657 		    && (sc->sc_type != WM_T_82583))
   10658 			wm_gmii_mdic_writereg(dev, phy,
   10659 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10660 		else
   10661 			wm_gmii_mdic_writereg(dev, phy,
   10662 			    BME1000_PHY_PAGE_SELECT, page);
   10663 	}
   10664 
   10665 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10666 
   10667 release:
   10668 	sc->phy.release(sc);
   10669 }
   10670 
   10671 static void
   10672 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10673 {
   10674 	struct wm_softc *sc = device_private(dev);
   10675 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10676 	uint16_t wuce, reg;
   10677 
   10678 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10679 		device_xname(dev), __func__));
   10680 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10681 	if (sc->sc_type == WM_T_PCH) {
   10682 		/* XXX e1000 driver do nothing... why? */
   10683 	}
   10684 
   10685 	/*
   10686 	 * 1) Enable PHY wakeup register first.
   10687 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10688 	 */
   10689 
   10690 	/* Set page 769 */
   10691 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10692 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10693 
   10694 	/* Read WUCE and save it */
   10695 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10696 
   10697 	reg = wuce | BM_WUC_ENABLE_BIT;
   10698 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10699 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10700 
   10701 	/* Select page 800 */
   10702 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10703 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10704 
   10705 	/*
   10706 	 * 2) Access PHY wakeup register.
   10707 	 * See e1000_access_phy_wakeup_reg_bm.
   10708 	 */
   10709 
   10710 	/* Write page 800 */
   10711 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10712 
   10713 	if (rd)
   10714 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10715 	else
   10716 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10717 
   10718 	/*
   10719 	 * 3) Disable PHY wakeup register.
   10720 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10721 	 */
   10722 	/* Set page 769 */
   10723 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10724 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10725 
   10726 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10727 }
   10728 
   10729 /*
   10730  * wm_gmii_hv_readreg:	[mii interface function]
   10731  *
   10732  *	Read a PHY register on the kumeran
   10733  * This could be handled by the PHY layer if we didn't have to lock the
   10734  * ressource ...
   10735  */
   10736 static int
   10737 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10738 {
   10739 	struct wm_softc *sc = device_private(dev);
   10740 	uint16_t val;
   10741 
   10742 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10743 		device_xname(dev), __func__));
   10744 	if (sc->phy.acquire(sc)) {
   10745 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10746 		return 0;
   10747 	}
   10748 
   10749 	wm_gmii_hv_readreg_locked(dev, phy, reg, &val);
   10750 	sc->phy.release(sc);
   10751 	return val;
   10752 }
   10753 
   10754 static int
   10755 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10756 {
   10757 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10758 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10759 
   10760 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10761 
   10762 	/* Page 800 works differently than the rest so it has its own func */
   10763 	if (page == BM_WUC_PAGE) {
   10764 		wm_access_phy_wakeup_reg_bm(dev, reg, val, 1);
   10765 		return 0;
   10766 	}
   10767 
   10768 	/*
   10769 	 * Lower than page 768 works differently than the rest so it has its
   10770 	 * own func
   10771 	 */
   10772 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10773 		printf("gmii_hv_readreg!!!\n");
   10774 		return 0;
   10775 	}
   10776 
   10777 	/*
   10778 	 * XXX I21[789] documents say that the SMBus Address register is at
   10779 	 * PHY address 01, Page 0 (not 768), Register 26.
   10780 	 */
   10781 	if (page == HV_INTC_FC_PAGE_START)
   10782 		page = 0;
   10783 
   10784 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10785 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10786 		    page << BME1000_PAGE_SHIFT);
   10787 	}
   10788 
   10789 	*val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10790 	return 0;
   10791 }
   10792 
   10793 /*
   10794  * wm_gmii_hv_writereg:	[mii interface function]
   10795  *
   10796  *	Write a PHY register on the kumeran.
   10797  * This could be handled by the PHY layer if we didn't have to lock the
   10798  * ressource ...
   10799  */
   10800 static void
   10801 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10802 {
   10803 	struct wm_softc *sc = device_private(dev);
   10804 
   10805 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10806 		device_xname(dev), __func__));
   10807 
   10808 	if (sc->phy.acquire(sc)) {
   10809 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10810 		return;
   10811 	}
   10812 
   10813 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10814 	sc->phy.release(sc);
   10815 }
   10816 
   10817 static int
   10818 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10819 {
   10820 	struct wm_softc *sc = device_private(dev);
   10821 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10822 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10823 
   10824 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10825 
   10826 	/* Page 800 works differently than the rest so it has its own func */
   10827 	if (page == BM_WUC_PAGE) {
   10828 		uint16_t tmp;
   10829 
   10830 		tmp = val;
   10831 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10832 		return 0;
   10833 	}
   10834 
   10835 	/*
   10836 	 * Lower than page 768 works differently than the rest so it has its
   10837 	 * own func
   10838 	 */
   10839 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10840 		printf("gmii_hv_writereg!!!\n");
   10841 		return -1;
   10842 	}
   10843 
   10844 	{
   10845 		/*
   10846 		 * XXX I21[789] documents say that the SMBus Address register
   10847 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10848 		 */
   10849 		if (page == HV_INTC_FC_PAGE_START)
   10850 			page = 0;
   10851 
   10852 		/*
   10853 		 * XXX Workaround MDIO accesses being disabled after entering
   10854 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10855 		 * register is set)
   10856 		 */
   10857 		if (sc->sc_phytype == WMPHY_82578) {
   10858 			struct mii_softc *child;
   10859 
   10860 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10861 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10862 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10863 			    && ((val & (1 << 11)) != 0)) {
   10864 				printf("XXX need workaround\n");
   10865 			}
   10866 		}
   10867 
   10868 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10869 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10870 			    page << BME1000_PAGE_SHIFT);
   10871 		}
   10872 	}
   10873 
   10874 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10875 
   10876 	return 0;
   10877 }
   10878 
   10879 /*
   10880  * wm_gmii_82580_readreg:	[mii interface function]
   10881  *
   10882  *	Read a PHY register on the 82580 and I350.
   10883  * This could be handled by the PHY layer if we didn't have to lock the
   10884  * ressource ...
   10885  */
   10886 static int
   10887 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10888 {
   10889 	struct wm_softc *sc = device_private(dev);
   10890 	int rv;
   10891 
   10892 	if (sc->phy.acquire(sc) != 0) {
   10893 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10894 		return 0;
   10895 	}
   10896 
   10897 #ifdef DIAGNOSTIC
   10898 	if (reg > MII_ADDRMASK) {
   10899 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10900 		    __func__, sc->sc_phytype, reg);
   10901 		reg &= MII_ADDRMASK;
   10902 	}
   10903 #endif
   10904 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10905 
   10906 	sc->phy.release(sc);
   10907 	return rv;
   10908 }
   10909 
   10910 /*
   10911  * wm_gmii_82580_writereg:	[mii interface function]
   10912  *
   10913  *	Write a PHY register on the 82580 and I350.
   10914  * This could be handled by the PHY layer if we didn't have to lock the
   10915  * ressource ...
   10916  */
   10917 static void
   10918 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10919 {
   10920 	struct wm_softc *sc = device_private(dev);
   10921 
   10922 	if (sc->phy.acquire(sc) != 0) {
   10923 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10924 		return;
   10925 	}
   10926 
   10927 #ifdef DIAGNOSTIC
   10928 	if (reg > MII_ADDRMASK) {
   10929 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10930 		    __func__, sc->sc_phytype, reg);
   10931 		reg &= MII_ADDRMASK;
   10932 	}
   10933 #endif
   10934 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10935 
   10936 	sc->phy.release(sc);
   10937 }
   10938 
   10939 /*
   10940  * wm_gmii_gs40g_readreg:	[mii interface function]
   10941  *
   10942  *	Read a PHY register on the I2100 and I211.
   10943  * This could be handled by the PHY layer if we didn't have to lock the
   10944  * ressource ...
   10945  */
   10946 static int
   10947 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10948 {
   10949 	struct wm_softc *sc = device_private(dev);
   10950 	int page, offset;
   10951 	int rv;
   10952 
   10953 	/* Acquire semaphore */
   10954 	if (sc->phy.acquire(sc)) {
   10955 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10956 		return 0;
   10957 	}
   10958 
   10959 	/* Page select */
   10960 	page = reg >> GS40G_PAGE_SHIFT;
   10961 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10962 
   10963 	/* Read reg */
   10964 	offset = reg & GS40G_OFFSET_MASK;
   10965 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10966 
   10967 	sc->phy.release(sc);
   10968 	return rv;
   10969 }
   10970 
   10971 /*
   10972  * wm_gmii_gs40g_writereg:	[mii interface function]
   10973  *
   10974  *	Write a PHY register on the I210 and I211.
   10975  * This could be handled by the PHY layer if we didn't have to lock the
   10976  * ressource ...
   10977  */
   10978 static void
   10979 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10980 {
   10981 	struct wm_softc *sc = device_private(dev);
   10982 	int page, offset;
   10983 
   10984 	/* Acquire semaphore */
   10985 	if (sc->phy.acquire(sc)) {
   10986 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10987 		return;
   10988 	}
   10989 
   10990 	/* Page select */
   10991 	page = reg >> GS40G_PAGE_SHIFT;
   10992 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10993 
   10994 	/* Write reg */
   10995 	offset = reg & GS40G_OFFSET_MASK;
   10996 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10997 
   10998 	/* Release semaphore */
   10999 	sc->phy.release(sc);
   11000 }
   11001 
   11002 /*
   11003  * wm_gmii_statchg:	[mii interface function]
   11004  *
   11005  *	Callback from MII layer when media changes.
   11006  */
   11007 static void
   11008 wm_gmii_statchg(struct ifnet *ifp)
   11009 {
   11010 	struct wm_softc *sc = ifp->if_softc;
   11011 	struct mii_data *mii = &sc->sc_mii;
   11012 
   11013 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11014 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11015 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11016 
   11017 	/*
   11018 	 * Get flow control negotiation result.
   11019 	 */
   11020 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11021 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11022 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11023 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11024 	}
   11025 
   11026 	if (sc->sc_flowflags & IFM_FLOW) {
   11027 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11028 			sc->sc_ctrl |= CTRL_TFCE;
   11029 			sc->sc_fcrtl |= FCRTL_XONE;
   11030 		}
   11031 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11032 			sc->sc_ctrl |= CTRL_RFCE;
   11033 	}
   11034 
   11035 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11036 		DPRINTF(WM_DEBUG_LINK,
   11037 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11038 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11039 	} else {
   11040 		DPRINTF(WM_DEBUG_LINK,
   11041 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11042 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11043 	}
   11044 
   11045 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11046 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11047 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11048 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11049 	if (sc->sc_type == WM_T_80003) {
   11050 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11051 		case IFM_1000_T:
   11052 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11053 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11054 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11055 			break;
   11056 		default:
   11057 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11058 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11059 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11060 			break;
   11061 		}
   11062 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11063 	}
   11064 }
   11065 
   11066 /* kumeran related (80003, ICH* and PCH*) */
   11067 
   11068 /*
   11069  * wm_kmrn_readreg:
   11070  *
   11071  *	Read a kumeran register
   11072  */
   11073 static int
   11074 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11075 {
   11076 	int rv;
   11077 
   11078 	if (sc->sc_type == WM_T_80003)
   11079 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11080 	else
   11081 		rv = sc->phy.acquire(sc);
   11082 	if (rv != 0) {
   11083 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11084 		    __func__);
   11085 		return rv;
   11086 	}
   11087 
   11088 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11089 
   11090 	if (sc->sc_type == WM_T_80003)
   11091 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11092 	else
   11093 		sc->phy.release(sc);
   11094 
   11095 	return rv;
   11096 }
   11097 
   11098 static int
   11099 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11100 {
   11101 
   11102 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11103 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11104 	    KUMCTRLSTA_REN);
   11105 	CSR_WRITE_FLUSH(sc);
   11106 	delay(2);
   11107 
   11108 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11109 
   11110 	return 0;
   11111 }
   11112 
   11113 /*
   11114  * wm_kmrn_writereg:
   11115  *
   11116  *	Write a kumeran register
   11117  */
   11118 static int
   11119 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11120 {
   11121 	int rv;
   11122 
   11123 	if (sc->sc_type == WM_T_80003)
   11124 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11125 	else
   11126 		rv = sc->phy.acquire(sc);
   11127 	if (rv != 0) {
   11128 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11129 		    __func__);
   11130 		return rv;
   11131 	}
   11132 
   11133 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11134 
   11135 	if (sc->sc_type == WM_T_80003)
   11136 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11137 	else
   11138 		sc->phy.release(sc);
   11139 
   11140 	return rv;
   11141 }
   11142 
   11143 static int
   11144 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11145 {
   11146 
   11147 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11148 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11149 
   11150 	return 0;
   11151 }
   11152 
   11153 /* SGMII related */
   11154 
   11155 /*
   11156  * wm_sgmii_uses_mdio
   11157  *
   11158  * Check whether the transaction is to the internal PHY or the external
   11159  * MDIO interface. Return true if it's MDIO.
   11160  */
   11161 static bool
   11162 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11163 {
   11164 	uint32_t reg;
   11165 	bool ismdio = false;
   11166 
   11167 	switch (sc->sc_type) {
   11168 	case WM_T_82575:
   11169 	case WM_T_82576:
   11170 		reg = CSR_READ(sc, WMREG_MDIC);
   11171 		ismdio = ((reg & MDIC_DEST) != 0);
   11172 		break;
   11173 	case WM_T_82580:
   11174 	case WM_T_I350:
   11175 	case WM_T_I354:
   11176 	case WM_T_I210:
   11177 	case WM_T_I211:
   11178 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11179 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11180 		break;
   11181 	default:
   11182 		break;
   11183 	}
   11184 
   11185 	return ismdio;
   11186 }
   11187 
   11188 /*
   11189  * wm_sgmii_readreg:	[mii interface function]
   11190  *
   11191  *	Read a PHY register on the SGMII
   11192  * This could be handled by the PHY layer if we didn't have to lock the
   11193  * ressource ...
   11194  */
   11195 static int
   11196 wm_sgmii_readreg(device_t dev, int phy, int reg)
   11197 {
   11198 	struct wm_softc *sc = device_private(dev);
   11199 	uint32_t i2ccmd;
   11200 	int i, rv;
   11201 
   11202 	if (sc->phy.acquire(sc)) {
   11203 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11204 		return 0;
   11205 	}
   11206 
   11207 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11208 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11209 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11210 
   11211 	/* Poll the ready bit */
   11212 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11213 		delay(50);
   11214 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11215 		if (i2ccmd & I2CCMD_READY)
   11216 			break;
   11217 	}
   11218 	if ((i2ccmd & I2CCMD_READY) == 0)
   11219 		device_printf(dev, "I2CCMD Read did not complete\n");
   11220 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11221 		device_printf(dev, "I2CCMD Error bit set\n");
   11222 
   11223 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11224 
   11225 	sc->phy.release(sc);
   11226 	return rv;
   11227 }
   11228 
   11229 /*
   11230  * wm_sgmii_writereg:	[mii interface function]
   11231  *
   11232  *	Write a PHY register on the SGMII.
   11233  * This could be handled by the PHY layer if we didn't have to lock the
   11234  * ressource ...
   11235  */
   11236 static void
   11237 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11238 {
   11239 	struct wm_softc *sc = device_private(dev);
   11240 	uint32_t i2ccmd;
   11241 	int i;
   11242 	int swapdata;
   11243 
   11244 	if (sc->phy.acquire(sc) != 0) {
   11245 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11246 		return;
   11247 	}
   11248 	/* Swap the data bytes for the I2C interface */
   11249 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11250 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11251 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11252 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11253 
   11254 	/* Poll the ready bit */
   11255 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11256 		delay(50);
   11257 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11258 		if (i2ccmd & I2CCMD_READY)
   11259 			break;
   11260 	}
   11261 	if ((i2ccmd & I2CCMD_READY) == 0)
   11262 		device_printf(dev, "I2CCMD Write did not complete\n");
   11263 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11264 		device_printf(dev, "I2CCMD Error bit set\n");
   11265 
   11266 	sc->phy.release(sc);
   11267 }
   11268 
   11269 /* TBI related */
   11270 
   11271 static bool
   11272 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11273 {
   11274 	bool sig;
   11275 
   11276 	sig = ctrl & CTRL_SWDPIN(1);
   11277 
   11278 	/*
   11279 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11280 	 * detect a signal, 1 if they don't.
   11281 	 */
   11282 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11283 		sig = !sig;
   11284 
   11285 	return sig;
   11286 }
   11287 
   11288 /*
   11289  * wm_tbi_mediainit:
   11290  *
   11291  *	Initialize media for use on 1000BASE-X devices.
   11292  */
   11293 static void
   11294 wm_tbi_mediainit(struct wm_softc *sc)
   11295 {
   11296 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11297 	const char *sep = "";
   11298 
   11299 	if (sc->sc_type < WM_T_82543)
   11300 		sc->sc_tipg = TIPG_WM_DFLT;
   11301 	else
   11302 		sc->sc_tipg = TIPG_LG_DFLT;
   11303 
   11304 	sc->sc_tbi_serdes_anegticks = 5;
   11305 
   11306 	/* Initialize our media structures */
   11307 	sc->sc_mii.mii_ifp = ifp;
   11308 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11309 
   11310 	if ((sc->sc_type >= WM_T_82575)
   11311 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11312 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11313 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11314 	else
   11315 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11316 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11317 
   11318 	/*
   11319 	 * SWD Pins:
   11320 	 *
   11321 	 *	0 = Link LED (output)
   11322 	 *	1 = Loss Of Signal (input)
   11323 	 */
   11324 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11325 
   11326 	/* XXX Perhaps this is only for TBI */
   11327 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11328 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11329 
   11330 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11331 		sc->sc_ctrl &= ~CTRL_LRST;
   11332 
   11333 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11334 
   11335 #define	ADD(ss, mm, dd)							\
   11336 do {									\
   11337 	aprint_normal("%s%s", sep, ss);					\
   11338 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11339 	sep = ", ";							\
   11340 } while (/*CONSTCOND*/0)
   11341 
   11342 	aprint_normal_dev(sc->sc_dev, "");
   11343 
   11344 	if (sc->sc_type == WM_T_I354) {
   11345 		uint32_t status;
   11346 
   11347 		status = CSR_READ(sc, WMREG_STATUS);
   11348 		if (((status & STATUS_2P5_SKU) != 0)
   11349 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11350 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11351 		} else
   11352 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11353 	} else if (sc->sc_type == WM_T_82545) {
   11354 		/* Only 82545 is LX (XXX except SFP) */
   11355 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11356 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11357 	} else {
   11358 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11359 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11360 	}
   11361 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11362 	aprint_normal("\n");
   11363 
   11364 #undef ADD
   11365 
   11366 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11367 }
   11368 
   11369 /*
   11370  * wm_tbi_mediachange:	[ifmedia interface function]
   11371  *
   11372  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11373  */
   11374 static int
   11375 wm_tbi_mediachange(struct ifnet *ifp)
   11376 {
   11377 	struct wm_softc *sc = ifp->if_softc;
   11378 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11379 	uint32_t status, ctrl;
   11380 	bool signal;
   11381 	int i;
   11382 
   11383 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11384 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11385 		/* XXX need some work for >= 82571 and < 82575 */
   11386 		if (sc->sc_type < WM_T_82575)
   11387 			return 0;
   11388 	}
   11389 
   11390 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11391 	    || (sc->sc_type >= WM_T_82575))
   11392 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11393 
   11394 	sc->sc_ctrl &= ~CTRL_LRST;
   11395 	sc->sc_txcw = TXCW_ANE;
   11396 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11397 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11398 	else if (ife->ifm_media & IFM_FDX)
   11399 		sc->sc_txcw |= TXCW_FD;
   11400 	else
   11401 		sc->sc_txcw |= TXCW_HD;
   11402 
   11403 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11404 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11405 
   11406 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11407 		device_xname(sc->sc_dev), sc->sc_txcw));
   11408 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11409 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11410 	CSR_WRITE_FLUSH(sc);
   11411 	delay(1000);
   11412 
   11413 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11414 	signal = wm_tbi_havesignal(sc, ctrl);
   11415 
   11416 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11417 		signal));
   11418 
   11419 	if (signal) {
   11420 		/* Have signal; wait for the link to come up. */
   11421 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11422 			delay(10000);
   11423 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11424 				break;
   11425 		}
   11426 
   11427 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11428 			device_xname(sc->sc_dev),i));
   11429 
   11430 		status = CSR_READ(sc, WMREG_STATUS);
   11431 		DPRINTF(WM_DEBUG_LINK,
   11432 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11433 			device_xname(sc->sc_dev),status, STATUS_LU));
   11434 		if (status & STATUS_LU) {
   11435 			/* Link is up. */
   11436 			DPRINTF(WM_DEBUG_LINK,
   11437 			    ("%s: LINK: set media -> link up %s\n",
   11438 				device_xname(sc->sc_dev),
   11439 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11440 
   11441 			/*
   11442 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11443 			 * so we should update sc->sc_ctrl
   11444 			 */
   11445 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11446 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11447 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11448 			if (status & STATUS_FD)
   11449 				sc->sc_tctl |=
   11450 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11451 			else
   11452 				sc->sc_tctl |=
   11453 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11454 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11455 				sc->sc_fcrtl |= FCRTL_XONE;
   11456 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11457 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11458 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11459 			sc->sc_tbi_linkup = 1;
   11460 		} else {
   11461 			if (i == WM_LINKUP_TIMEOUT)
   11462 				wm_check_for_link(sc);
   11463 			/* Link is down. */
   11464 			DPRINTF(WM_DEBUG_LINK,
   11465 			    ("%s: LINK: set media -> link down\n",
   11466 				device_xname(sc->sc_dev)));
   11467 			sc->sc_tbi_linkup = 0;
   11468 		}
   11469 	} else {
   11470 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11471 			device_xname(sc->sc_dev)));
   11472 		sc->sc_tbi_linkup = 0;
   11473 	}
   11474 
   11475 	wm_tbi_serdes_set_linkled(sc);
   11476 
   11477 	return 0;
   11478 }
   11479 
   11480 /*
   11481  * wm_tbi_mediastatus:	[ifmedia interface function]
   11482  *
   11483  *	Get the current interface media status on a 1000BASE-X device.
   11484  */
   11485 static void
   11486 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11487 {
   11488 	struct wm_softc *sc = ifp->if_softc;
   11489 	uint32_t ctrl, status;
   11490 
   11491 	ifmr->ifm_status = IFM_AVALID;
   11492 	ifmr->ifm_active = IFM_ETHER;
   11493 
   11494 	status = CSR_READ(sc, WMREG_STATUS);
   11495 	if ((status & STATUS_LU) == 0) {
   11496 		ifmr->ifm_active |= IFM_NONE;
   11497 		return;
   11498 	}
   11499 
   11500 	ifmr->ifm_status |= IFM_ACTIVE;
   11501 	/* Only 82545 is LX */
   11502 	if (sc->sc_type == WM_T_82545)
   11503 		ifmr->ifm_active |= IFM_1000_LX;
   11504 	else
   11505 		ifmr->ifm_active |= IFM_1000_SX;
   11506 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11507 		ifmr->ifm_active |= IFM_FDX;
   11508 	else
   11509 		ifmr->ifm_active |= IFM_HDX;
   11510 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11511 	if (ctrl & CTRL_RFCE)
   11512 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11513 	if (ctrl & CTRL_TFCE)
   11514 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11515 }
   11516 
   11517 /* XXX TBI only */
   11518 static int
   11519 wm_check_for_link(struct wm_softc *sc)
   11520 {
   11521 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11522 	uint32_t rxcw;
   11523 	uint32_t ctrl;
   11524 	uint32_t status;
   11525 	bool signal;
   11526 
   11527 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11528 		device_xname(sc->sc_dev), __func__));
   11529 
   11530 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11531 		/* XXX need some work for >= 82571 */
   11532 		if (sc->sc_type >= WM_T_82571) {
   11533 			sc->sc_tbi_linkup = 1;
   11534 			return 0;
   11535 		}
   11536 	}
   11537 
   11538 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11539 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11540 	status = CSR_READ(sc, WMREG_STATUS);
   11541 	signal = wm_tbi_havesignal(sc, ctrl);
   11542 
   11543 	DPRINTF(WM_DEBUG_LINK,
   11544 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11545 		device_xname(sc->sc_dev), __func__, signal,
   11546 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11547 
   11548 	/*
   11549 	 * SWDPIN   LU RXCW
   11550 	 *	0    0	  0
   11551 	 *	0    0	  1	(should not happen)
   11552 	 *	0    1	  0	(should not happen)
   11553 	 *	0    1	  1	(should not happen)
   11554 	 *	1    0	  0	Disable autonego and force linkup
   11555 	 *	1    0	  1	got /C/ but not linkup yet
   11556 	 *	1    1	  0	(linkup)
   11557 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11558 	 *
   11559 	 */
   11560 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11561 		DPRINTF(WM_DEBUG_LINK,
   11562 		    ("%s: %s: force linkup and fullduplex\n",
   11563 			device_xname(sc->sc_dev), __func__));
   11564 		sc->sc_tbi_linkup = 0;
   11565 		/* Disable auto-negotiation in the TXCW register */
   11566 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11567 
   11568 		/*
   11569 		 * Force link-up and also force full-duplex.
   11570 		 *
   11571 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11572 		 * so we should update sc->sc_ctrl
   11573 		 */
   11574 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11575 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11576 	} else if (((status & STATUS_LU) != 0)
   11577 	    && ((rxcw & RXCW_C) != 0)
   11578 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11579 		sc->sc_tbi_linkup = 1;
   11580 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11581 			device_xname(sc->sc_dev),
   11582 			__func__));
   11583 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11584 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11585 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11586 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11587 			device_xname(sc->sc_dev), __func__));
   11588 	} else {
   11589 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11590 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11591 			status));
   11592 	}
   11593 
   11594 	return 0;
   11595 }
   11596 
   11597 /*
   11598  * wm_tbi_tick:
   11599  *
   11600  *	Check the link on TBI devices.
   11601  *	This function acts as mii_tick().
   11602  */
   11603 static void
   11604 wm_tbi_tick(struct wm_softc *sc)
   11605 {
   11606 	struct mii_data *mii = &sc->sc_mii;
   11607 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11608 	uint32_t status;
   11609 
   11610 	KASSERT(WM_CORE_LOCKED(sc));
   11611 
   11612 	status = CSR_READ(sc, WMREG_STATUS);
   11613 
   11614 	/* XXX is this needed? */
   11615 	(void)CSR_READ(sc, WMREG_RXCW);
   11616 	(void)CSR_READ(sc, WMREG_CTRL);
   11617 
   11618 	/* set link status */
   11619 	if ((status & STATUS_LU) == 0) {
   11620 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11621 			device_xname(sc->sc_dev)));
   11622 		sc->sc_tbi_linkup = 0;
   11623 	} else if (sc->sc_tbi_linkup == 0) {
   11624 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11625 			device_xname(sc->sc_dev),
   11626 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11627 		sc->sc_tbi_linkup = 1;
   11628 		sc->sc_tbi_serdes_ticks = 0;
   11629 	}
   11630 
   11631 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11632 		goto setled;
   11633 
   11634 	if ((status & STATUS_LU) == 0) {
   11635 		sc->sc_tbi_linkup = 0;
   11636 		/* If the timer expired, retry autonegotiation */
   11637 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11638 		    && (++sc->sc_tbi_serdes_ticks
   11639 			>= sc->sc_tbi_serdes_anegticks)) {
   11640 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11641 			sc->sc_tbi_serdes_ticks = 0;
   11642 			/*
   11643 			 * Reset the link, and let autonegotiation do
   11644 			 * its thing
   11645 			 */
   11646 			sc->sc_ctrl |= CTRL_LRST;
   11647 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11648 			CSR_WRITE_FLUSH(sc);
   11649 			delay(1000);
   11650 			sc->sc_ctrl &= ~CTRL_LRST;
   11651 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11652 			CSR_WRITE_FLUSH(sc);
   11653 			delay(1000);
   11654 			CSR_WRITE(sc, WMREG_TXCW,
   11655 			    sc->sc_txcw & ~TXCW_ANE);
   11656 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11657 		}
   11658 	}
   11659 
   11660 setled:
   11661 	wm_tbi_serdes_set_linkled(sc);
   11662 }
   11663 
   11664 /* SERDES related */
   11665 static void
   11666 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11667 {
   11668 	uint32_t reg;
   11669 
   11670 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11671 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11672 		return;
   11673 
   11674 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11675 	reg |= PCS_CFG_PCS_EN;
   11676 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11677 
   11678 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11679 	reg &= ~CTRL_EXT_SWDPIN(3);
   11680 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11681 	CSR_WRITE_FLUSH(sc);
   11682 }
   11683 
   11684 static int
   11685 wm_serdes_mediachange(struct ifnet *ifp)
   11686 {
   11687 	struct wm_softc *sc = ifp->if_softc;
   11688 	bool pcs_autoneg = true; /* XXX */
   11689 	uint32_t ctrl_ext, pcs_lctl, reg;
   11690 
   11691 	/* XXX Currently, this function is not called on 8257[12] */
   11692 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11693 	    || (sc->sc_type >= WM_T_82575))
   11694 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11695 
   11696 	wm_serdes_power_up_link_82575(sc);
   11697 
   11698 	sc->sc_ctrl |= CTRL_SLU;
   11699 
   11700 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11701 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11702 
   11703 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11704 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11705 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11706 	case CTRL_EXT_LINK_MODE_SGMII:
   11707 		pcs_autoneg = true;
   11708 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11709 		break;
   11710 	case CTRL_EXT_LINK_MODE_1000KX:
   11711 		pcs_autoneg = false;
   11712 		/* FALLTHROUGH */
   11713 	default:
   11714 		if ((sc->sc_type == WM_T_82575)
   11715 		    || (sc->sc_type == WM_T_82576)) {
   11716 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11717 				pcs_autoneg = false;
   11718 		}
   11719 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11720 		    | CTRL_FRCFDX;
   11721 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11722 	}
   11723 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11724 
   11725 	if (pcs_autoneg) {
   11726 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11727 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11728 
   11729 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11730 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11731 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11732 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11733 	} else
   11734 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11735 
   11736 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11737 
   11738 
   11739 	return 0;
   11740 }
   11741 
   11742 static void
   11743 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11744 {
   11745 	struct wm_softc *sc = ifp->if_softc;
   11746 	struct mii_data *mii = &sc->sc_mii;
   11747 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11748 	uint32_t pcs_adv, pcs_lpab, reg;
   11749 
   11750 	ifmr->ifm_status = IFM_AVALID;
   11751 	ifmr->ifm_active = IFM_ETHER;
   11752 
   11753 	/* Check PCS */
   11754 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11755 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11756 		ifmr->ifm_active |= IFM_NONE;
   11757 		sc->sc_tbi_linkup = 0;
   11758 		goto setled;
   11759 	}
   11760 
   11761 	sc->sc_tbi_linkup = 1;
   11762 	ifmr->ifm_status |= IFM_ACTIVE;
   11763 	if (sc->sc_type == WM_T_I354) {
   11764 		uint32_t status;
   11765 
   11766 		status = CSR_READ(sc, WMREG_STATUS);
   11767 		if (((status & STATUS_2P5_SKU) != 0)
   11768 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11769 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11770 		} else
   11771 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11772 	} else {
   11773 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11774 		case PCS_LSTS_SPEED_10:
   11775 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11776 			break;
   11777 		case PCS_LSTS_SPEED_100:
   11778 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11779 			break;
   11780 		case PCS_LSTS_SPEED_1000:
   11781 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11782 			break;
   11783 		default:
   11784 			device_printf(sc->sc_dev, "Unknown speed\n");
   11785 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11786 			break;
   11787 		}
   11788 	}
   11789 	if ((reg & PCS_LSTS_FDX) != 0)
   11790 		ifmr->ifm_active |= IFM_FDX;
   11791 	else
   11792 		ifmr->ifm_active |= IFM_HDX;
   11793 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11794 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11795 		/* Check flow */
   11796 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11797 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11798 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11799 			goto setled;
   11800 		}
   11801 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11802 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11803 		DPRINTF(WM_DEBUG_LINK,
   11804 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11805 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11806 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11807 			mii->mii_media_active |= IFM_FLOW
   11808 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11809 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11810 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11811 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11812 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11813 			mii->mii_media_active |= IFM_FLOW
   11814 			    | IFM_ETH_TXPAUSE;
   11815 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11816 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11817 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11818 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11819 			mii->mii_media_active |= IFM_FLOW
   11820 			    | IFM_ETH_RXPAUSE;
   11821 		}
   11822 	}
   11823 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11824 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11825 setled:
   11826 	wm_tbi_serdes_set_linkled(sc);
   11827 }
   11828 
   11829 /*
   11830  * wm_serdes_tick:
   11831  *
   11832  *	Check the link on serdes devices.
   11833  */
   11834 static void
   11835 wm_serdes_tick(struct wm_softc *sc)
   11836 {
   11837 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11838 	struct mii_data *mii = &sc->sc_mii;
   11839 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11840 	uint32_t reg;
   11841 
   11842 	KASSERT(WM_CORE_LOCKED(sc));
   11843 
   11844 	mii->mii_media_status = IFM_AVALID;
   11845 	mii->mii_media_active = IFM_ETHER;
   11846 
   11847 	/* Check PCS */
   11848 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11849 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11850 		mii->mii_media_status |= IFM_ACTIVE;
   11851 		sc->sc_tbi_linkup = 1;
   11852 		sc->sc_tbi_serdes_ticks = 0;
   11853 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11854 		if ((reg & PCS_LSTS_FDX) != 0)
   11855 			mii->mii_media_active |= IFM_FDX;
   11856 		else
   11857 			mii->mii_media_active |= IFM_HDX;
   11858 	} else {
   11859 		mii->mii_media_status |= IFM_NONE;
   11860 		sc->sc_tbi_linkup = 0;
   11861 		/* If the timer expired, retry autonegotiation */
   11862 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11863 		    && (++sc->sc_tbi_serdes_ticks
   11864 			>= sc->sc_tbi_serdes_anegticks)) {
   11865 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11866 			sc->sc_tbi_serdes_ticks = 0;
   11867 			/* XXX */
   11868 			wm_serdes_mediachange(ifp);
   11869 		}
   11870 	}
   11871 
   11872 	wm_tbi_serdes_set_linkled(sc);
   11873 }
   11874 
   11875 /* SFP related */
   11876 
   11877 static int
   11878 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11879 {
   11880 	uint32_t i2ccmd;
   11881 	int i;
   11882 
   11883 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11884 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11885 
   11886 	/* Poll the ready bit */
   11887 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11888 		delay(50);
   11889 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11890 		if (i2ccmd & I2CCMD_READY)
   11891 			break;
   11892 	}
   11893 	if ((i2ccmd & I2CCMD_READY) == 0)
   11894 		return -1;
   11895 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11896 		return -1;
   11897 
   11898 	*data = i2ccmd & 0x00ff;
   11899 
   11900 	return 0;
   11901 }
   11902 
   11903 static uint32_t
   11904 wm_sfp_get_media_type(struct wm_softc *sc)
   11905 {
   11906 	uint32_t ctrl_ext;
   11907 	uint8_t val = 0;
   11908 	int timeout = 3;
   11909 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11910 	int rv = -1;
   11911 
   11912 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11913 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11914 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11915 	CSR_WRITE_FLUSH(sc);
   11916 
   11917 	/* Read SFP module data */
   11918 	while (timeout) {
   11919 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11920 		if (rv == 0)
   11921 			break;
   11922 		delay(100*1000); /* XXX too big */
   11923 		timeout--;
   11924 	}
   11925 	if (rv != 0)
   11926 		goto out;
   11927 	switch (val) {
   11928 	case SFF_SFP_ID_SFF:
   11929 		aprint_normal_dev(sc->sc_dev,
   11930 		    "Module/Connector soldered to board\n");
   11931 		break;
   11932 	case SFF_SFP_ID_SFP:
   11933 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11934 		break;
   11935 	case SFF_SFP_ID_UNKNOWN:
   11936 		goto out;
   11937 	default:
   11938 		break;
   11939 	}
   11940 
   11941 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11942 	if (rv != 0) {
   11943 		goto out;
   11944 	}
   11945 
   11946 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11947 		mediatype = WM_MEDIATYPE_SERDES;
   11948 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11949 		sc->sc_flags |= WM_F_SGMII;
   11950 		mediatype = WM_MEDIATYPE_COPPER;
   11951 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11952 		sc->sc_flags |= WM_F_SGMII;
   11953 		mediatype = WM_MEDIATYPE_SERDES;
   11954 	}
   11955 
   11956 out:
   11957 	/* Restore I2C interface setting */
   11958 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11959 
   11960 	return mediatype;
   11961 }
   11962 
   11963 /*
   11964  * NVM related.
   11965  * Microwire, SPI (w/wo EERD) and Flash.
   11966  */
   11967 
   11968 /* Both spi and uwire */
   11969 
   11970 /*
   11971  * wm_eeprom_sendbits:
   11972  *
   11973  *	Send a series of bits to the EEPROM.
   11974  */
   11975 static void
   11976 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11977 {
   11978 	uint32_t reg;
   11979 	int x;
   11980 
   11981 	reg = CSR_READ(sc, WMREG_EECD);
   11982 
   11983 	for (x = nbits; x > 0; x--) {
   11984 		if (bits & (1U << (x - 1)))
   11985 			reg |= EECD_DI;
   11986 		else
   11987 			reg &= ~EECD_DI;
   11988 		CSR_WRITE(sc, WMREG_EECD, reg);
   11989 		CSR_WRITE_FLUSH(sc);
   11990 		delay(2);
   11991 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11992 		CSR_WRITE_FLUSH(sc);
   11993 		delay(2);
   11994 		CSR_WRITE(sc, WMREG_EECD, reg);
   11995 		CSR_WRITE_FLUSH(sc);
   11996 		delay(2);
   11997 	}
   11998 }
   11999 
   12000 /*
   12001  * wm_eeprom_recvbits:
   12002  *
   12003  *	Receive a series of bits from the EEPROM.
   12004  */
   12005 static void
   12006 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12007 {
   12008 	uint32_t reg, val;
   12009 	int x;
   12010 
   12011 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12012 
   12013 	val = 0;
   12014 	for (x = nbits; x > 0; x--) {
   12015 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12016 		CSR_WRITE_FLUSH(sc);
   12017 		delay(2);
   12018 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12019 			val |= (1U << (x - 1));
   12020 		CSR_WRITE(sc, WMREG_EECD, reg);
   12021 		CSR_WRITE_FLUSH(sc);
   12022 		delay(2);
   12023 	}
   12024 	*valp = val;
   12025 }
   12026 
   12027 /* Microwire */
   12028 
   12029 /*
   12030  * wm_nvm_read_uwire:
   12031  *
   12032  *	Read a word from the EEPROM using the MicroWire protocol.
   12033  */
   12034 static int
   12035 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12036 {
   12037 	uint32_t reg, val;
   12038 	int i;
   12039 
   12040 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12041 		device_xname(sc->sc_dev), __func__));
   12042 
   12043 	if (sc->nvm.acquire(sc) != 0)
   12044 		return -1;
   12045 
   12046 	for (i = 0; i < wordcnt; i++) {
   12047 		/* Clear SK and DI. */
   12048 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12049 		CSR_WRITE(sc, WMREG_EECD, reg);
   12050 
   12051 		/*
   12052 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12053 		 * and Xen.
   12054 		 *
   12055 		 * We use this workaround only for 82540 because qemu's
   12056 		 * e1000 act as 82540.
   12057 		 */
   12058 		if (sc->sc_type == WM_T_82540) {
   12059 			reg |= EECD_SK;
   12060 			CSR_WRITE(sc, WMREG_EECD, reg);
   12061 			reg &= ~EECD_SK;
   12062 			CSR_WRITE(sc, WMREG_EECD, reg);
   12063 			CSR_WRITE_FLUSH(sc);
   12064 			delay(2);
   12065 		}
   12066 		/* XXX: end of workaround */
   12067 
   12068 		/* Set CHIP SELECT. */
   12069 		reg |= EECD_CS;
   12070 		CSR_WRITE(sc, WMREG_EECD, reg);
   12071 		CSR_WRITE_FLUSH(sc);
   12072 		delay(2);
   12073 
   12074 		/* Shift in the READ command. */
   12075 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12076 
   12077 		/* Shift in address. */
   12078 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12079 
   12080 		/* Shift out the data. */
   12081 		wm_eeprom_recvbits(sc, &val, 16);
   12082 		data[i] = val & 0xffff;
   12083 
   12084 		/* Clear CHIP SELECT. */
   12085 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12086 		CSR_WRITE(sc, WMREG_EECD, reg);
   12087 		CSR_WRITE_FLUSH(sc);
   12088 		delay(2);
   12089 	}
   12090 
   12091 	sc->nvm.release(sc);
   12092 	return 0;
   12093 }
   12094 
   12095 /* SPI */
   12096 
   12097 /*
   12098  * Set SPI and FLASH related information from the EECD register.
   12099  * For 82541 and 82547, the word size is taken from EEPROM.
   12100  */
   12101 static int
   12102 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12103 {
   12104 	int size;
   12105 	uint32_t reg;
   12106 	uint16_t data;
   12107 
   12108 	reg = CSR_READ(sc, WMREG_EECD);
   12109 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12110 
   12111 	/* Read the size of NVM from EECD by default */
   12112 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12113 	switch (sc->sc_type) {
   12114 	case WM_T_82541:
   12115 	case WM_T_82541_2:
   12116 	case WM_T_82547:
   12117 	case WM_T_82547_2:
   12118 		/* Set dummy value to access EEPROM */
   12119 		sc->sc_nvm_wordsize = 64;
   12120 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12121 			aprint_error_dev(sc->sc_dev,
   12122 			    "%s: failed to read EEPROM size\n", __func__);
   12123 		}
   12124 		reg = data;
   12125 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12126 		if (size == 0)
   12127 			size = 6; /* 64 word size */
   12128 		else
   12129 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12130 		break;
   12131 	case WM_T_80003:
   12132 	case WM_T_82571:
   12133 	case WM_T_82572:
   12134 	case WM_T_82573: /* SPI case */
   12135 	case WM_T_82574: /* SPI case */
   12136 	case WM_T_82583: /* SPI case */
   12137 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12138 		if (size > 14)
   12139 			size = 14;
   12140 		break;
   12141 	case WM_T_82575:
   12142 	case WM_T_82576:
   12143 	case WM_T_82580:
   12144 	case WM_T_I350:
   12145 	case WM_T_I354:
   12146 	case WM_T_I210:
   12147 	case WM_T_I211:
   12148 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12149 		if (size > 15)
   12150 			size = 15;
   12151 		break;
   12152 	default:
   12153 		aprint_error_dev(sc->sc_dev,
   12154 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12155 		return -1;
   12156 		break;
   12157 	}
   12158 
   12159 	sc->sc_nvm_wordsize = 1 << size;
   12160 
   12161 	return 0;
   12162 }
   12163 
   12164 /*
   12165  * wm_nvm_ready_spi:
   12166  *
   12167  *	Wait for a SPI EEPROM to be ready for commands.
   12168  */
   12169 static int
   12170 wm_nvm_ready_spi(struct wm_softc *sc)
   12171 {
   12172 	uint32_t val;
   12173 	int usec;
   12174 
   12175 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12176 		device_xname(sc->sc_dev), __func__));
   12177 
   12178 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12179 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12180 		wm_eeprom_recvbits(sc, &val, 8);
   12181 		if ((val & SPI_SR_RDY) == 0)
   12182 			break;
   12183 	}
   12184 	if (usec >= SPI_MAX_RETRIES) {
   12185 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12186 		return -1;
   12187 	}
   12188 	return 0;
   12189 }
   12190 
   12191 /*
   12192  * wm_nvm_read_spi:
   12193  *
   12194  *	Read a work from the EEPROM using the SPI protocol.
   12195  */
   12196 static int
   12197 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12198 {
   12199 	uint32_t reg, val;
   12200 	int i;
   12201 	uint8_t opc;
   12202 	int rv = 0;
   12203 
   12204 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12205 		device_xname(sc->sc_dev), __func__));
   12206 
   12207 	if (sc->nvm.acquire(sc) != 0)
   12208 		return -1;
   12209 
   12210 	/* Clear SK and CS. */
   12211 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12212 	CSR_WRITE(sc, WMREG_EECD, reg);
   12213 	CSR_WRITE_FLUSH(sc);
   12214 	delay(2);
   12215 
   12216 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12217 		goto out;
   12218 
   12219 	/* Toggle CS to flush commands. */
   12220 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12221 	CSR_WRITE_FLUSH(sc);
   12222 	delay(2);
   12223 	CSR_WRITE(sc, WMREG_EECD, reg);
   12224 	CSR_WRITE_FLUSH(sc);
   12225 	delay(2);
   12226 
   12227 	opc = SPI_OPC_READ;
   12228 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12229 		opc |= SPI_OPC_A8;
   12230 
   12231 	wm_eeprom_sendbits(sc, opc, 8);
   12232 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12233 
   12234 	for (i = 0; i < wordcnt; i++) {
   12235 		wm_eeprom_recvbits(sc, &val, 16);
   12236 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12237 	}
   12238 
   12239 	/* Raise CS and clear SK. */
   12240 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12241 	CSR_WRITE(sc, WMREG_EECD, reg);
   12242 	CSR_WRITE_FLUSH(sc);
   12243 	delay(2);
   12244 
   12245 out:
   12246 	sc->nvm.release(sc);
   12247 	return rv;
   12248 }
   12249 
   12250 /* Using with EERD */
   12251 
   12252 static int
   12253 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12254 {
   12255 	uint32_t attempts = 100000;
   12256 	uint32_t i, reg = 0;
   12257 	int32_t done = -1;
   12258 
   12259 	for (i = 0; i < attempts; i++) {
   12260 		reg = CSR_READ(sc, rw);
   12261 
   12262 		if (reg & EERD_DONE) {
   12263 			done = 0;
   12264 			break;
   12265 		}
   12266 		delay(5);
   12267 	}
   12268 
   12269 	return done;
   12270 }
   12271 
   12272 static int
   12273 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12274 {
   12275 	int i, eerd = 0;
   12276 	int rv = 0;
   12277 
   12278 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12279 		device_xname(sc->sc_dev), __func__));
   12280 
   12281 	if (sc->nvm.acquire(sc) != 0)
   12282 		return -1;
   12283 
   12284 	for (i = 0; i < wordcnt; i++) {
   12285 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12286 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12287 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12288 		if (rv != 0) {
   12289 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12290 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12291 			break;
   12292 		}
   12293 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12294 	}
   12295 
   12296 	sc->nvm.release(sc);
   12297 	return rv;
   12298 }
   12299 
   12300 /* Flash */
   12301 
   12302 static int
   12303 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12304 {
   12305 	uint32_t eecd;
   12306 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12307 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12308 	uint32_t nvm_dword = 0;
   12309 	uint8_t sig_byte = 0;
   12310 	int rv;
   12311 
   12312 	switch (sc->sc_type) {
   12313 	case WM_T_PCH_SPT:
   12314 	case WM_T_PCH_CNP:
   12315 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12316 		act_offset = ICH_NVM_SIG_WORD * 2;
   12317 
   12318 		/* set bank to 0 in case flash read fails. */
   12319 		*bank = 0;
   12320 
   12321 		/* Check bank 0 */
   12322 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12323 		if (rv != 0)
   12324 			return rv;
   12325 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12326 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12327 			*bank = 0;
   12328 			return 0;
   12329 		}
   12330 
   12331 		/* Check bank 1 */
   12332 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12333 		    &nvm_dword);
   12334 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12335 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12336 			*bank = 1;
   12337 			return 0;
   12338 		}
   12339 		aprint_error_dev(sc->sc_dev,
   12340 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12341 		return -1;
   12342 	case WM_T_ICH8:
   12343 	case WM_T_ICH9:
   12344 		eecd = CSR_READ(sc, WMREG_EECD);
   12345 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12346 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12347 			return 0;
   12348 		}
   12349 		/* FALLTHROUGH */
   12350 	default:
   12351 		/* Default to 0 */
   12352 		*bank = 0;
   12353 
   12354 		/* Check bank 0 */
   12355 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12356 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12357 			*bank = 0;
   12358 			return 0;
   12359 		}
   12360 
   12361 		/* Check bank 1 */
   12362 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12363 		    &sig_byte);
   12364 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12365 			*bank = 1;
   12366 			return 0;
   12367 		}
   12368 	}
   12369 
   12370 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12371 		device_xname(sc->sc_dev)));
   12372 	return -1;
   12373 }
   12374 
   12375 /******************************************************************************
   12376  * This function does initial flash setup so that a new read/write/erase cycle
   12377  * can be started.
   12378  *
   12379  * sc - The pointer to the hw structure
   12380  ****************************************************************************/
   12381 static int32_t
   12382 wm_ich8_cycle_init(struct wm_softc *sc)
   12383 {
   12384 	uint16_t hsfsts;
   12385 	int32_t error = 1;
   12386 	int32_t i     = 0;
   12387 
   12388 	if (sc->sc_type >= WM_T_PCH_SPT)
   12389 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12390 	else
   12391 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12392 
   12393 	/* May be check the Flash Des Valid bit in Hw status */
   12394 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12395 		return error;
   12396 
   12397 	/* Clear FCERR in Hw status by writing 1 */
   12398 	/* Clear DAEL in Hw status by writing a 1 */
   12399 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12400 
   12401 	if (sc->sc_type >= WM_T_PCH_SPT)
   12402 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12403 	else
   12404 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12405 
   12406 	/*
   12407 	 * Either we should have a hardware SPI cycle in progress bit to check
   12408 	 * against, in order to start a new cycle or FDONE bit should be
   12409 	 * changed in the hardware so that it is 1 after harware reset, which
   12410 	 * can then be used as an indication whether a cycle is in progress or
   12411 	 * has been completed .. we should also have some software semaphore
   12412 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12413 	 * threads access to those bits can be sequentiallized or a way so that
   12414 	 * 2 threads dont start the cycle at the same time
   12415 	 */
   12416 
   12417 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12418 		/*
   12419 		 * There is no cycle running at present, so we can start a
   12420 		 * cycle
   12421 		 */
   12422 
   12423 		/* Begin by setting Flash Cycle Done. */
   12424 		hsfsts |= HSFSTS_DONE;
   12425 		if (sc->sc_type >= WM_T_PCH_SPT)
   12426 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12427 			    hsfsts & 0xffffUL);
   12428 		else
   12429 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12430 		error = 0;
   12431 	} else {
   12432 		/*
   12433 		 * otherwise poll for sometime so the current cycle has a
   12434 		 * chance to end before giving up.
   12435 		 */
   12436 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12437 			if (sc->sc_type >= WM_T_PCH_SPT)
   12438 				hsfsts = ICH8_FLASH_READ32(sc,
   12439 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12440 			else
   12441 				hsfsts = ICH8_FLASH_READ16(sc,
   12442 				    ICH_FLASH_HSFSTS);
   12443 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12444 				error = 0;
   12445 				break;
   12446 			}
   12447 			delay(1);
   12448 		}
   12449 		if (error == 0) {
   12450 			/*
   12451 			 * Successful in waiting for previous cycle to timeout,
   12452 			 * now set the Flash Cycle Done.
   12453 			 */
   12454 			hsfsts |= HSFSTS_DONE;
   12455 			if (sc->sc_type >= WM_T_PCH_SPT)
   12456 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12457 				    hsfsts & 0xffffUL);
   12458 			else
   12459 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12460 				    hsfsts);
   12461 		}
   12462 	}
   12463 	return error;
   12464 }
   12465 
   12466 /******************************************************************************
   12467  * This function starts a flash cycle and waits for its completion
   12468  *
   12469  * sc - The pointer to the hw structure
   12470  ****************************************************************************/
   12471 static int32_t
   12472 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12473 {
   12474 	uint16_t hsflctl;
   12475 	uint16_t hsfsts;
   12476 	int32_t error = 1;
   12477 	uint32_t i = 0;
   12478 
   12479 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12480 	if (sc->sc_type >= WM_T_PCH_SPT)
   12481 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12482 	else
   12483 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12484 	hsflctl |= HSFCTL_GO;
   12485 	if (sc->sc_type >= WM_T_PCH_SPT)
   12486 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12487 		    (uint32_t)hsflctl << 16);
   12488 	else
   12489 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12490 
   12491 	/* Wait till FDONE bit is set to 1 */
   12492 	do {
   12493 		if (sc->sc_type >= WM_T_PCH_SPT)
   12494 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12495 			    & 0xffffUL;
   12496 		else
   12497 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12498 		if (hsfsts & HSFSTS_DONE)
   12499 			break;
   12500 		delay(1);
   12501 		i++;
   12502 	} while (i < timeout);
   12503 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12504 		error = 0;
   12505 
   12506 	return error;
   12507 }
   12508 
   12509 /******************************************************************************
   12510  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12511  *
   12512  * sc - The pointer to the hw structure
   12513  * index - The index of the byte or word to read.
   12514  * size - Size of data to read, 1=byte 2=word, 4=dword
   12515  * data - Pointer to the word to store the value read.
   12516  *****************************************************************************/
   12517 static int32_t
   12518 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12519     uint32_t size, uint32_t *data)
   12520 {
   12521 	uint16_t hsfsts;
   12522 	uint16_t hsflctl;
   12523 	uint32_t flash_linear_address;
   12524 	uint32_t flash_data = 0;
   12525 	int32_t error = 1;
   12526 	int32_t count = 0;
   12527 
   12528 	if (size < 1  || size > 4 || data == 0x0 ||
   12529 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12530 		return error;
   12531 
   12532 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12533 	    sc->sc_ich8_flash_base;
   12534 
   12535 	do {
   12536 		delay(1);
   12537 		/* Steps */
   12538 		error = wm_ich8_cycle_init(sc);
   12539 		if (error)
   12540 			break;
   12541 
   12542 		if (sc->sc_type >= WM_T_PCH_SPT)
   12543 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12544 			    >> 16;
   12545 		else
   12546 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12547 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12548 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12549 		    & HSFCTL_BCOUNT_MASK;
   12550 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12551 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12552 			/*
   12553 			 * In SPT, This register is in Lan memory space, not
   12554 			 * flash. Therefore, only 32 bit access is supported.
   12555 			 */
   12556 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12557 			    (uint32_t)hsflctl << 16);
   12558 		} else
   12559 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12560 
   12561 		/*
   12562 		 * Write the last 24 bits of index into Flash Linear address
   12563 		 * field in Flash Address
   12564 		 */
   12565 		/* TODO: TBD maybe check the index against the size of flash */
   12566 
   12567 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12568 
   12569 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12570 
   12571 		/*
   12572 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12573 		 * the whole sequence a few more times, else read in (shift in)
   12574 		 * the Flash Data0, the order is least significant byte first
   12575 		 * msb to lsb
   12576 		 */
   12577 		if (error == 0) {
   12578 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12579 			if (size == 1)
   12580 				*data = (uint8_t)(flash_data & 0x000000FF);
   12581 			else if (size == 2)
   12582 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12583 			else if (size == 4)
   12584 				*data = (uint32_t)flash_data;
   12585 			break;
   12586 		} else {
   12587 			/*
   12588 			 * If we've gotten here, then things are probably
   12589 			 * completely hosed, but if the error condition is
   12590 			 * detected, it won't hurt to give it another try...
   12591 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12592 			 */
   12593 			if (sc->sc_type >= WM_T_PCH_SPT)
   12594 				hsfsts = ICH8_FLASH_READ32(sc,
   12595 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12596 			else
   12597 				hsfsts = ICH8_FLASH_READ16(sc,
   12598 				    ICH_FLASH_HSFSTS);
   12599 
   12600 			if (hsfsts & HSFSTS_ERR) {
   12601 				/* Repeat for some time before giving up. */
   12602 				continue;
   12603 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12604 				break;
   12605 		}
   12606 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12607 
   12608 	return error;
   12609 }
   12610 
   12611 /******************************************************************************
   12612  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12613  *
   12614  * sc - pointer to wm_hw structure
   12615  * index - The index of the byte to read.
   12616  * data - Pointer to a byte to store the value read.
   12617  *****************************************************************************/
   12618 static int32_t
   12619 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12620 {
   12621 	int32_t status;
   12622 	uint32_t word = 0;
   12623 
   12624 	status = wm_read_ich8_data(sc, index, 1, &word);
   12625 	if (status == 0)
   12626 		*data = (uint8_t)word;
   12627 	else
   12628 		*data = 0;
   12629 
   12630 	return status;
   12631 }
   12632 
   12633 /******************************************************************************
   12634  * Reads a word from the NVM using the ICH8 flash access registers.
   12635  *
   12636  * sc - pointer to wm_hw structure
   12637  * index - The starting byte index of the word to read.
   12638  * data - Pointer to a word to store the value read.
   12639  *****************************************************************************/
   12640 static int32_t
   12641 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12642 {
   12643 	int32_t status;
   12644 	uint32_t word = 0;
   12645 
   12646 	status = wm_read_ich8_data(sc, index, 2, &word);
   12647 	if (status == 0)
   12648 		*data = (uint16_t)word;
   12649 	else
   12650 		*data = 0;
   12651 
   12652 	return status;
   12653 }
   12654 
   12655 /******************************************************************************
   12656  * Reads a dword from the NVM using the ICH8 flash access registers.
   12657  *
   12658  * sc - pointer to wm_hw structure
   12659  * index - The starting byte index of the word to read.
   12660  * data - Pointer to a word to store the value read.
   12661  *****************************************************************************/
   12662 static int32_t
   12663 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12664 {
   12665 	int32_t status;
   12666 
   12667 	status = wm_read_ich8_data(sc, index, 4, data);
   12668 	return status;
   12669 }
   12670 
   12671 /******************************************************************************
   12672  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12673  * register.
   12674  *
   12675  * sc - Struct containing variables accessed by shared code
   12676  * offset - offset of word in the EEPROM to read
   12677  * data - word read from the EEPROM
   12678  * words - number of words to read
   12679  *****************************************************************************/
   12680 static int
   12681 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12682 {
   12683 	int32_t	 rv = 0;
   12684 	uint32_t flash_bank = 0;
   12685 	uint32_t act_offset = 0;
   12686 	uint32_t bank_offset = 0;
   12687 	uint16_t word = 0;
   12688 	uint16_t i = 0;
   12689 
   12690 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12691 		device_xname(sc->sc_dev), __func__));
   12692 
   12693 	if (sc->nvm.acquire(sc) != 0)
   12694 		return -1;
   12695 
   12696 	/*
   12697 	 * We need to know which is the valid flash bank.  In the event
   12698 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12699 	 * managing flash_bank. So it cannot be trusted and needs
   12700 	 * to be updated with each read.
   12701 	 */
   12702 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12703 	if (rv) {
   12704 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12705 			device_xname(sc->sc_dev)));
   12706 		flash_bank = 0;
   12707 	}
   12708 
   12709 	/*
   12710 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12711 	 * size
   12712 	 */
   12713 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12714 
   12715 	for (i = 0; i < words; i++) {
   12716 		/* The NVM part needs a byte offset, hence * 2 */
   12717 		act_offset = bank_offset + ((offset + i) * 2);
   12718 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12719 		if (rv) {
   12720 			aprint_error_dev(sc->sc_dev,
   12721 			    "%s: failed to read NVM\n", __func__);
   12722 			break;
   12723 		}
   12724 		data[i] = word;
   12725 	}
   12726 
   12727 	sc->nvm.release(sc);
   12728 	return rv;
   12729 }
   12730 
   12731 /******************************************************************************
   12732  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12733  * register.
   12734  *
   12735  * sc - Struct containing variables accessed by shared code
   12736  * offset - offset of word in the EEPROM to read
   12737  * data - word read from the EEPROM
   12738  * words - number of words to read
   12739  *****************************************************************************/
   12740 static int
   12741 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12742 {
   12743 	int32_t	 rv = 0;
   12744 	uint32_t flash_bank = 0;
   12745 	uint32_t act_offset = 0;
   12746 	uint32_t bank_offset = 0;
   12747 	uint32_t dword = 0;
   12748 	uint16_t i = 0;
   12749 
   12750 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12751 		device_xname(sc->sc_dev), __func__));
   12752 
   12753 	if (sc->nvm.acquire(sc) != 0)
   12754 		return -1;
   12755 
   12756 	/*
   12757 	 * We need to know which is the valid flash bank.  In the event
   12758 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12759 	 * managing flash_bank. So it cannot be trusted and needs
   12760 	 * to be updated with each read.
   12761 	 */
   12762 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12763 	if (rv) {
   12764 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12765 			device_xname(sc->sc_dev)));
   12766 		flash_bank = 0;
   12767 	}
   12768 
   12769 	/*
   12770 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12771 	 * size
   12772 	 */
   12773 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12774 
   12775 	for (i = 0; i < words; i++) {
   12776 		/* The NVM part needs a byte offset, hence * 2 */
   12777 		act_offset = bank_offset + ((offset + i) * 2);
   12778 		/* but we must read dword aligned, so mask ... */
   12779 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12780 		if (rv) {
   12781 			aprint_error_dev(sc->sc_dev,
   12782 			    "%s: failed to read NVM\n", __func__);
   12783 			break;
   12784 		}
   12785 		/* ... and pick out low or high word */
   12786 		if ((act_offset & 0x2) == 0)
   12787 			data[i] = (uint16_t)(dword & 0xFFFF);
   12788 		else
   12789 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12790 	}
   12791 
   12792 	sc->nvm.release(sc);
   12793 	return rv;
   12794 }
   12795 
   12796 /* iNVM */
   12797 
   12798 static int
   12799 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12800 {
   12801 	int32_t	 rv = 0;
   12802 	uint32_t invm_dword;
   12803 	uint16_t i;
   12804 	uint8_t record_type, word_address;
   12805 
   12806 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12807 		device_xname(sc->sc_dev), __func__));
   12808 
   12809 	for (i = 0; i < INVM_SIZE; i++) {
   12810 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12811 		/* Get record type */
   12812 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12813 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12814 			break;
   12815 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12816 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12817 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12818 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12819 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12820 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12821 			if (word_address == address) {
   12822 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12823 				rv = 0;
   12824 				break;
   12825 			}
   12826 		}
   12827 	}
   12828 
   12829 	return rv;
   12830 }
   12831 
   12832 static int
   12833 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12834 {
   12835 	int rv = 0;
   12836 	int i;
   12837 
   12838 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12839 		device_xname(sc->sc_dev), __func__));
   12840 
   12841 	if (sc->nvm.acquire(sc) != 0)
   12842 		return -1;
   12843 
   12844 	for (i = 0; i < words; i++) {
   12845 		switch (offset + i) {
   12846 		case NVM_OFF_MACADDR:
   12847 		case NVM_OFF_MACADDR1:
   12848 		case NVM_OFF_MACADDR2:
   12849 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12850 			if (rv != 0) {
   12851 				data[i] = 0xffff;
   12852 				rv = -1;
   12853 			}
   12854 			break;
   12855 		case NVM_OFF_CFG2:
   12856 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12857 			if (rv != 0) {
   12858 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12859 				rv = 0;
   12860 			}
   12861 			break;
   12862 		case NVM_OFF_CFG4:
   12863 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12864 			if (rv != 0) {
   12865 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12866 				rv = 0;
   12867 			}
   12868 			break;
   12869 		case NVM_OFF_LED_1_CFG:
   12870 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12871 			if (rv != 0) {
   12872 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12873 				rv = 0;
   12874 			}
   12875 			break;
   12876 		case NVM_OFF_LED_0_2_CFG:
   12877 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12878 			if (rv != 0) {
   12879 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12880 				rv = 0;
   12881 			}
   12882 			break;
   12883 		case NVM_OFF_ID_LED_SETTINGS:
   12884 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12885 			if (rv != 0) {
   12886 				*data = ID_LED_RESERVED_FFFF;
   12887 				rv = 0;
   12888 			}
   12889 			break;
   12890 		default:
   12891 			DPRINTF(WM_DEBUG_NVM,
   12892 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12893 			*data = NVM_RESERVED_WORD;
   12894 			break;
   12895 		}
   12896 	}
   12897 
   12898 	sc->nvm.release(sc);
   12899 	return rv;
   12900 }
   12901 
   12902 /* Lock, detecting NVM type, validate checksum, version and read */
   12903 
   12904 static int
   12905 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12906 {
   12907 	uint32_t eecd = 0;
   12908 
   12909 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12910 	    || sc->sc_type == WM_T_82583) {
   12911 		eecd = CSR_READ(sc, WMREG_EECD);
   12912 
   12913 		/* Isolate bits 15 & 16 */
   12914 		eecd = ((eecd >> 15) & 0x03);
   12915 
   12916 		/* If both bits are set, device is Flash type */
   12917 		if (eecd == 0x03)
   12918 			return 0;
   12919 	}
   12920 	return 1;
   12921 }
   12922 
   12923 static int
   12924 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12925 {
   12926 	uint32_t eec;
   12927 
   12928 	eec = CSR_READ(sc, WMREG_EEC);
   12929 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12930 		return 1;
   12931 
   12932 	return 0;
   12933 }
   12934 
   12935 /*
   12936  * wm_nvm_validate_checksum
   12937  *
   12938  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12939  */
   12940 static int
   12941 wm_nvm_validate_checksum(struct wm_softc *sc)
   12942 {
   12943 	uint16_t checksum;
   12944 	uint16_t eeprom_data;
   12945 #ifdef WM_DEBUG
   12946 	uint16_t csum_wordaddr, valid_checksum;
   12947 #endif
   12948 	int i;
   12949 
   12950 	checksum = 0;
   12951 
   12952 	/* Don't check for I211 */
   12953 	if (sc->sc_type == WM_T_I211)
   12954 		return 0;
   12955 
   12956 #ifdef WM_DEBUG
   12957 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12958 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12959 		csum_wordaddr = NVM_OFF_COMPAT;
   12960 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12961 	} else {
   12962 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12963 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12964 	}
   12965 
   12966 	/* Dump EEPROM image for debug */
   12967 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12968 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12969 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12970 		/* XXX PCH_SPT? */
   12971 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12972 		if ((eeprom_data & valid_checksum) == 0) {
   12973 			DPRINTF(WM_DEBUG_NVM,
   12974 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12975 				device_xname(sc->sc_dev), eeprom_data,
   12976 				    valid_checksum));
   12977 		}
   12978 	}
   12979 
   12980 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12981 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12982 		for (i = 0; i < NVM_SIZE; i++) {
   12983 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12984 				printf("XXXX ");
   12985 			else
   12986 				printf("%04hx ", eeprom_data);
   12987 			if (i % 8 == 7)
   12988 				printf("\n");
   12989 		}
   12990 	}
   12991 
   12992 #endif /* WM_DEBUG */
   12993 
   12994 	for (i = 0; i < NVM_SIZE; i++) {
   12995 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12996 			return 1;
   12997 		checksum += eeprom_data;
   12998 	}
   12999 
   13000 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13001 #ifdef WM_DEBUG
   13002 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13003 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13004 #endif
   13005 	}
   13006 
   13007 	return 0;
   13008 }
   13009 
   13010 static void
   13011 wm_nvm_version_invm(struct wm_softc *sc)
   13012 {
   13013 	uint32_t dword;
   13014 
   13015 	/*
   13016 	 * Linux's code to decode version is very strange, so we don't
   13017 	 * obey that algorithm and just use word 61 as the document.
   13018 	 * Perhaps it's not perfect though...
   13019 	 *
   13020 	 * Example:
   13021 	 *
   13022 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13023 	 */
   13024 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13025 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13026 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13027 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13028 }
   13029 
   13030 static void
   13031 wm_nvm_version(struct wm_softc *sc)
   13032 {
   13033 	uint16_t major, minor, build, patch;
   13034 	uint16_t uid0, uid1;
   13035 	uint16_t nvm_data;
   13036 	uint16_t off;
   13037 	bool check_version = false;
   13038 	bool check_optionrom = false;
   13039 	bool have_build = false;
   13040 	bool have_uid = true;
   13041 
   13042 	/*
   13043 	 * Version format:
   13044 	 *
   13045 	 * XYYZ
   13046 	 * X0YZ
   13047 	 * X0YY
   13048 	 *
   13049 	 * Example:
   13050 	 *
   13051 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13052 	 *	82571	0x50a6	5.10.6?
   13053 	 *	82572	0x506a	5.6.10?
   13054 	 *	82572EI	0x5069	5.6.9?
   13055 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13056 	 *		0x2013	2.1.3?
   13057 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13058 	 */
   13059 
   13060 	/*
   13061 	 * XXX
   13062 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13063 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13064 	 */
   13065 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13066 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13067 		have_uid = false;
   13068 
   13069 	switch (sc->sc_type) {
   13070 	case WM_T_82571:
   13071 	case WM_T_82572:
   13072 	case WM_T_82574:
   13073 	case WM_T_82583:
   13074 		check_version = true;
   13075 		check_optionrom = true;
   13076 		have_build = true;
   13077 		break;
   13078 	case WM_T_82575:
   13079 	case WM_T_82576:
   13080 	case WM_T_82580:
   13081 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13082 			check_version = true;
   13083 		break;
   13084 	case WM_T_I211:
   13085 		wm_nvm_version_invm(sc);
   13086 		have_uid = false;
   13087 		goto printver;
   13088 	case WM_T_I210:
   13089 		if (!wm_nvm_flash_presence_i210(sc)) {
   13090 			wm_nvm_version_invm(sc);
   13091 			have_uid = false;
   13092 			goto printver;
   13093 		}
   13094 		/* FALLTHROUGH */
   13095 	case WM_T_I350:
   13096 	case WM_T_I354:
   13097 		check_version = true;
   13098 		check_optionrom = true;
   13099 		break;
   13100 	default:
   13101 		return;
   13102 	}
   13103 	if (check_version
   13104 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13105 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13106 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13107 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13108 			build = nvm_data & NVM_BUILD_MASK;
   13109 			have_build = true;
   13110 		} else
   13111 			minor = nvm_data & 0x00ff;
   13112 
   13113 		/* Decimal */
   13114 		minor = (minor / 16) * 10 + (minor % 16);
   13115 		sc->sc_nvm_ver_major = major;
   13116 		sc->sc_nvm_ver_minor = minor;
   13117 
   13118 printver:
   13119 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13120 		    sc->sc_nvm_ver_minor);
   13121 		if (have_build) {
   13122 			sc->sc_nvm_ver_build = build;
   13123 			aprint_verbose(".%d", build);
   13124 		}
   13125 	}
   13126 
   13127 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13128 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13129 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13130 		/* Option ROM Version */
   13131 		if ((off != 0x0000) && (off != 0xffff)) {
   13132 			int rv;
   13133 
   13134 			off += NVM_COMBO_VER_OFF;
   13135 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13136 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13137 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13138 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13139 				/* 16bits */
   13140 				major = uid0 >> 8;
   13141 				build = (uid0 << 8) | (uid1 >> 8);
   13142 				patch = uid1 & 0x00ff;
   13143 				aprint_verbose(", option ROM Version %d.%d.%d",
   13144 				    major, build, patch);
   13145 			}
   13146 		}
   13147 	}
   13148 
   13149 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13150 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13151 }
   13152 
   13153 /*
   13154  * wm_nvm_read:
   13155  *
   13156  *	Read data from the serial EEPROM.
   13157  */
   13158 static int
   13159 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13160 {
   13161 	int rv;
   13162 
   13163 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13164 		device_xname(sc->sc_dev), __func__));
   13165 
   13166 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13167 		return -1;
   13168 
   13169 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13170 
   13171 	return rv;
   13172 }
   13173 
   13174 /*
   13175  * Hardware semaphores.
   13176  * Very complexed...
   13177  */
   13178 
   13179 static int
   13180 wm_get_null(struct wm_softc *sc)
   13181 {
   13182 
   13183 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13184 		device_xname(sc->sc_dev), __func__));
   13185 	return 0;
   13186 }
   13187 
   13188 static void
   13189 wm_put_null(struct wm_softc *sc)
   13190 {
   13191 
   13192 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13193 		device_xname(sc->sc_dev), __func__));
   13194 	return;
   13195 }
   13196 
   13197 static int
   13198 wm_get_eecd(struct wm_softc *sc)
   13199 {
   13200 	uint32_t reg;
   13201 	int x;
   13202 
   13203 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13204 		device_xname(sc->sc_dev), __func__));
   13205 
   13206 	reg = CSR_READ(sc, WMREG_EECD);
   13207 
   13208 	/* Request EEPROM access. */
   13209 	reg |= EECD_EE_REQ;
   13210 	CSR_WRITE(sc, WMREG_EECD, reg);
   13211 
   13212 	/* ..and wait for it to be granted. */
   13213 	for (x = 0; x < 1000; x++) {
   13214 		reg = CSR_READ(sc, WMREG_EECD);
   13215 		if (reg & EECD_EE_GNT)
   13216 			break;
   13217 		delay(5);
   13218 	}
   13219 	if ((reg & EECD_EE_GNT) == 0) {
   13220 		aprint_error_dev(sc->sc_dev,
   13221 		    "could not acquire EEPROM GNT\n");
   13222 		reg &= ~EECD_EE_REQ;
   13223 		CSR_WRITE(sc, WMREG_EECD, reg);
   13224 		return -1;
   13225 	}
   13226 
   13227 	return 0;
   13228 }
   13229 
   13230 static void
   13231 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13232 {
   13233 
   13234 	*eecd |= EECD_SK;
   13235 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13236 	CSR_WRITE_FLUSH(sc);
   13237 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13238 		delay(1);
   13239 	else
   13240 		delay(50);
   13241 }
   13242 
   13243 static void
   13244 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13245 {
   13246 
   13247 	*eecd &= ~EECD_SK;
   13248 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13249 	CSR_WRITE_FLUSH(sc);
   13250 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13251 		delay(1);
   13252 	else
   13253 		delay(50);
   13254 }
   13255 
   13256 static void
   13257 wm_put_eecd(struct wm_softc *sc)
   13258 {
   13259 	uint32_t reg;
   13260 
   13261 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13262 		device_xname(sc->sc_dev), __func__));
   13263 
   13264 	/* Stop nvm */
   13265 	reg = CSR_READ(sc, WMREG_EECD);
   13266 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13267 		/* Pull CS high */
   13268 		reg |= EECD_CS;
   13269 		wm_nvm_eec_clock_lower(sc, &reg);
   13270 	} else {
   13271 		/* CS on Microwire is active-high */
   13272 		reg &= ~(EECD_CS | EECD_DI);
   13273 		CSR_WRITE(sc, WMREG_EECD, reg);
   13274 		wm_nvm_eec_clock_raise(sc, &reg);
   13275 		wm_nvm_eec_clock_lower(sc, &reg);
   13276 	}
   13277 
   13278 	reg = CSR_READ(sc, WMREG_EECD);
   13279 	reg &= ~EECD_EE_REQ;
   13280 	CSR_WRITE(sc, WMREG_EECD, reg);
   13281 
   13282 	return;
   13283 }
   13284 
   13285 /*
   13286  * Get hardware semaphore.
   13287  * Same as e1000_get_hw_semaphore_generic()
   13288  */
   13289 static int
   13290 wm_get_swsm_semaphore(struct wm_softc *sc)
   13291 {
   13292 	int32_t timeout;
   13293 	uint32_t swsm;
   13294 
   13295 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13296 		device_xname(sc->sc_dev), __func__));
   13297 	KASSERT(sc->sc_nvm_wordsize > 0);
   13298 
   13299 retry:
   13300 	/* Get the SW semaphore. */
   13301 	timeout = sc->sc_nvm_wordsize + 1;
   13302 	while (timeout) {
   13303 		swsm = CSR_READ(sc, WMREG_SWSM);
   13304 
   13305 		if ((swsm & SWSM_SMBI) == 0)
   13306 			break;
   13307 
   13308 		delay(50);
   13309 		timeout--;
   13310 	}
   13311 
   13312 	if (timeout == 0) {
   13313 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13314 			/*
   13315 			 * In rare circumstances, the SW semaphore may already
   13316 			 * be held unintentionally. Clear the semaphore once
   13317 			 * before giving up.
   13318 			 */
   13319 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13320 			wm_put_swsm_semaphore(sc);
   13321 			goto retry;
   13322 		}
   13323 		aprint_error_dev(sc->sc_dev,
   13324 		    "could not acquire SWSM SMBI\n");
   13325 		return 1;
   13326 	}
   13327 
   13328 	/* Get the FW semaphore. */
   13329 	timeout = sc->sc_nvm_wordsize + 1;
   13330 	while (timeout) {
   13331 		swsm = CSR_READ(sc, WMREG_SWSM);
   13332 		swsm |= SWSM_SWESMBI;
   13333 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13334 		/* If we managed to set the bit we got the semaphore. */
   13335 		swsm = CSR_READ(sc, WMREG_SWSM);
   13336 		if (swsm & SWSM_SWESMBI)
   13337 			break;
   13338 
   13339 		delay(50);
   13340 		timeout--;
   13341 	}
   13342 
   13343 	if (timeout == 0) {
   13344 		aprint_error_dev(sc->sc_dev,
   13345 		    "could not acquire SWSM SWESMBI\n");
   13346 		/* Release semaphores */
   13347 		wm_put_swsm_semaphore(sc);
   13348 		return 1;
   13349 	}
   13350 	return 0;
   13351 }
   13352 
   13353 /*
   13354  * Put hardware semaphore.
   13355  * Same as e1000_put_hw_semaphore_generic()
   13356  */
   13357 static void
   13358 wm_put_swsm_semaphore(struct wm_softc *sc)
   13359 {
   13360 	uint32_t swsm;
   13361 
   13362 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13363 		device_xname(sc->sc_dev), __func__));
   13364 
   13365 	swsm = CSR_READ(sc, WMREG_SWSM);
   13366 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13367 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13368 }
   13369 
   13370 /*
   13371  * Get SW/FW semaphore.
   13372  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13373  */
   13374 static int
   13375 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13376 {
   13377 	uint32_t swfw_sync;
   13378 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13379 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13380 	int timeout;
   13381 
   13382 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13383 		device_xname(sc->sc_dev), __func__));
   13384 
   13385 	if (sc->sc_type == WM_T_80003)
   13386 		timeout = 50;
   13387 	else
   13388 		timeout = 200;
   13389 
   13390 	while (timeout) {
   13391 		if (wm_get_swsm_semaphore(sc)) {
   13392 			aprint_error_dev(sc->sc_dev,
   13393 			    "%s: failed to get semaphore\n",
   13394 			    __func__);
   13395 			return 1;
   13396 		}
   13397 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13398 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13399 			swfw_sync |= swmask;
   13400 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13401 			wm_put_swsm_semaphore(sc);
   13402 			return 0;
   13403 		}
   13404 		wm_put_swsm_semaphore(sc);
   13405 		delay(5000);
   13406 		timeout--;
   13407 	}
   13408 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13409 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13410 	return 1;
   13411 }
   13412 
   13413 static void
   13414 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13415 {
   13416 	uint32_t swfw_sync;
   13417 
   13418 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13419 		device_xname(sc->sc_dev), __func__));
   13420 
   13421 	while (wm_get_swsm_semaphore(sc) != 0)
   13422 		continue;
   13423 
   13424 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13425 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13426 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13427 
   13428 	wm_put_swsm_semaphore(sc);
   13429 }
   13430 
   13431 static int
   13432 wm_get_nvm_80003(struct wm_softc *sc)
   13433 {
   13434 	int rv;
   13435 
   13436 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13437 		device_xname(sc->sc_dev), __func__));
   13438 
   13439 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13440 		aprint_error_dev(sc->sc_dev,
   13441 		    "%s: failed to get semaphore(SWFW)\n",
   13442 		    __func__);
   13443 		return rv;
   13444 	}
   13445 
   13446 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13447 	    && (rv = wm_get_eecd(sc)) != 0) {
   13448 		aprint_error_dev(sc->sc_dev,
   13449 		    "%s: failed to get semaphore(EECD)\n",
   13450 		    __func__);
   13451 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13452 		return rv;
   13453 	}
   13454 
   13455 	return 0;
   13456 }
   13457 
   13458 static void
   13459 wm_put_nvm_80003(struct wm_softc *sc)
   13460 {
   13461 
   13462 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13463 		device_xname(sc->sc_dev), __func__));
   13464 
   13465 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13466 		wm_put_eecd(sc);
   13467 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13468 }
   13469 
   13470 static int
   13471 wm_get_nvm_82571(struct wm_softc *sc)
   13472 {
   13473 	int rv;
   13474 
   13475 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13476 		device_xname(sc->sc_dev), __func__));
   13477 
   13478 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13479 		return rv;
   13480 
   13481 	switch (sc->sc_type) {
   13482 	case WM_T_82573:
   13483 		break;
   13484 	default:
   13485 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13486 			rv = wm_get_eecd(sc);
   13487 		break;
   13488 	}
   13489 
   13490 	if (rv != 0) {
   13491 		aprint_error_dev(sc->sc_dev,
   13492 		    "%s: failed to get semaphore\n",
   13493 		    __func__);
   13494 		wm_put_swsm_semaphore(sc);
   13495 	}
   13496 
   13497 	return rv;
   13498 }
   13499 
   13500 static void
   13501 wm_put_nvm_82571(struct wm_softc *sc)
   13502 {
   13503 
   13504 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13505 		device_xname(sc->sc_dev), __func__));
   13506 
   13507 	switch (sc->sc_type) {
   13508 	case WM_T_82573:
   13509 		break;
   13510 	default:
   13511 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13512 			wm_put_eecd(sc);
   13513 		break;
   13514 	}
   13515 
   13516 	wm_put_swsm_semaphore(sc);
   13517 }
   13518 
   13519 static int
   13520 wm_get_phy_82575(struct wm_softc *sc)
   13521 {
   13522 
   13523 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13524 		device_xname(sc->sc_dev), __func__));
   13525 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13526 }
   13527 
   13528 static void
   13529 wm_put_phy_82575(struct wm_softc *sc)
   13530 {
   13531 
   13532 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13533 		device_xname(sc->sc_dev), __func__));
   13534 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13535 }
   13536 
   13537 static int
   13538 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13539 {
   13540 	uint32_t ext_ctrl;
   13541 	int timeout = 200;
   13542 
   13543 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13544 		device_xname(sc->sc_dev), __func__));
   13545 
   13546 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13547 	for (timeout = 0; timeout < 200; timeout++) {
   13548 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13549 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13550 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13551 
   13552 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13553 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13554 			return 0;
   13555 		delay(5000);
   13556 	}
   13557 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13558 	    device_xname(sc->sc_dev), ext_ctrl);
   13559 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13560 	return 1;
   13561 }
   13562 
   13563 static void
   13564 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13565 {
   13566 	uint32_t ext_ctrl;
   13567 
   13568 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13569 		device_xname(sc->sc_dev), __func__));
   13570 
   13571 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13572 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13573 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13574 
   13575 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13576 }
   13577 
   13578 static int
   13579 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13580 {
   13581 	uint32_t ext_ctrl;
   13582 	int timeout;
   13583 
   13584 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13585 		device_xname(sc->sc_dev), __func__));
   13586 	mutex_enter(sc->sc_ich_phymtx);
   13587 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13588 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13589 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13590 			break;
   13591 		delay(1000);
   13592 	}
   13593 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13594 		printf("%s: SW has already locked the resource\n",
   13595 		    device_xname(sc->sc_dev));
   13596 		goto out;
   13597 	}
   13598 
   13599 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13600 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13601 	for (timeout = 0; timeout < 1000; timeout++) {
   13602 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13603 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13604 			break;
   13605 		delay(1000);
   13606 	}
   13607 	if (timeout >= 1000) {
   13608 		printf("%s: failed to acquire semaphore\n",
   13609 		    device_xname(sc->sc_dev));
   13610 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13611 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13612 		goto out;
   13613 	}
   13614 	return 0;
   13615 
   13616 out:
   13617 	mutex_exit(sc->sc_ich_phymtx);
   13618 	return 1;
   13619 }
   13620 
   13621 static void
   13622 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13623 {
   13624 	uint32_t ext_ctrl;
   13625 
   13626 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13627 		device_xname(sc->sc_dev), __func__));
   13628 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13629 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13630 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13631 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13632 	} else {
   13633 		printf("%s: Semaphore unexpectedly released\n",
   13634 		    device_xname(sc->sc_dev));
   13635 	}
   13636 
   13637 	mutex_exit(sc->sc_ich_phymtx);
   13638 }
   13639 
   13640 static int
   13641 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13642 {
   13643 
   13644 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13645 		device_xname(sc->sc_dev), __func__));
   13646 	mutex_enter(sc->sc_ich_nvmmtx);
   13647 
   13648 	return 0;
   13649 }
   13650 
   13651 static void
   13652 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13653 {
   13654 
   13655 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13656 		device_xname(sc->sc_dev), __func__));
   13657 	mutex_exit(sc->sc_ich_nvmmtx);
   13658 }
   13659 
   13660 static int
   13661 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13662 {
   13663 	int i = 0;
   13664 	uint32_t reg;
   13665 
   13666 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13667 		device_xname(sc->sc_dev), __func__));
   13668 
   13669 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13670 	do {
   13671 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13672 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13673 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13674 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13675 			break;
   13676 		delay(2*1000);
   13677 		i++;
   13678 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13679 
   13680 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13681 		wm_put_hw_semaphore_82573(sc);
   13682 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13683 		    device_xname(sc->sc_dev));
   13684 		return -1;
   13685 	}
   13686 
   13687 	return 0;
   13688 }
   13689 
   13690 static void
   13691 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13692 {
   13693 	uint32_t reg;
   13694 
   13695 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13696 		device_xname(sc->sc_dev), __func__));
   13697 
   13698 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13699 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13700 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13701 }
   13702 
   13703 /*
   13704  * Management mode and power management related subroutines.
   13705  * BMC, AMT, suspend/resume and EEE.
   13706  */
   13707 
   13708 #ifdef WM_WOL
   13709 static int
   13710 wm_check_mng_mode(struct wm_softc *sc)
   13711 {
   13712 	int rv;
   13713 
   13714 	switch (sc->sc_type) {
   13715 	case WM_T_ICH8:
   13716 	case WM_T_ICH9:
   13717 	case WM_T_ICH10:
   13718 	case WM_T_PCH:
   13719 	case WM_T_PCH2:
   13720 	case WM_T_PCH_LPT:
   13721 	case WM_T_PCH_SPT:
   13722 	case WM_T_PCH_CNP:
   13723 		rv = wm_check_mng_mode_ich8lan(sc);
   13724 		break;
   13725 	case WM_T_82574:
   13726 	case WM_T_82583:
   13727 		rv = wm_check_mng_mode_82574(sc);
   13728 		break;
   13729 	case WM_T_82571:
   13730 	case WM_T_82572:
   13731 	case WM_T_82573:
   13732 	case WM_T_80003:
   13733 		rv = wm_check_mng_mode_generic(sc);
   13734 		break;
   13735 	default:
   13736 		/* noting to do */
   13737 		rv = 0;
   13738 		break;
   13739 	}
   13740 
   13741 	return rv;
   13742 }
   13743 
   13744 static int
   13745 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13746 {
   13747 	uint32_t fwsm;
   13748 
   13749 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13750 
   13751 	if (((fwsm & FWSM_FW_VALID) != 0)
   13752 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13753 		return 1;
   13754 
   13755 	return 0;
   13756 }
   13757 
   13758 static int
   13759 wm_check_mng_mode_82574(struct wm_softc *sc)
   13760 {
   13761 	uint16_t data;
   13762 
   13763 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13764 
   13765 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13766 		return 1;
   13767 
   13768 	return 0;
   13769 }
   13770 
   13771 static int
   13772 wm_check_mng_mode_generic(struct wm_softc *sc)
   13773 {
   13774 	uint32_t fwsm;
   13775 
   13776 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13777 
   13778 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13779 		return 1;
   13780 
   13781 	return 0;
   13782 }
   13783 #endif /* WM_WOL */
   13784 
   13785 static int
   13786 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13787 {
   13788 	uint32_t manc, fwsm, factps;
   13789 
   13790 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13791 		return 0;
   13792 
   13793 	manc = CSR_READ(sc, WMREG_MANC);
   13794 
   13795 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13796 		device_xname(sc->sc_dev), manc));
   13797 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13798 		return 0;
   13799 
   13800 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13801 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13802 		factps = CSR_READ(sc, WMREG_FACTPS);
   13803 		if (((factps & FACTPS_MNGCG) == 0)
   13804 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13805 			return 1;
   13806 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13807 		uint16_t data;
   13808 
   13809 		factps = CSR_READ(sc, WMREG_FACTPS);
   13810 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13811 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13812 			device_xname(sc->sc_dev), factps, data));
   13813 		if (((factps & FACTPS_MNGCG) == 0)
   13814 		    && ((data & NVM_CFG2_MNGM_MASK)
   13815 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13816 			return 1;
   13817 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13818 	    && ((manc & MANC_ASF_EN) == 0))
   13819 		return 1;
   13820 
   13821 	return 0;
   13822 }
   13823 
   13824 static bool
   13825 wm_phy_resetisblocked(struct wm_softc *sc)
   13826 {
   13827 	bool blocked = false;
   13828 	uint32_t reg;
   13829 	int i = 0;
   13830 
   13831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13832 		device_xname(sc->sc_dev), __func__));
   13833 
   13834 	switch (sc->sc_type) {
   13835 	case WM_T_ICH8:
   13836 	case WM_T_ICH9:
   13837 	case WM_T_ICH10:
   13838 	case WM_T_PCH:
   13839 	case WM_T_PCH2:
   13840 	case WM_T_PCH_LPT:
   13841 	case WM_T_PCH_SPT:
   13842 	case WM_T_PCH_CNP:
   13843 		do {
   13844 			reg = CSR_READ(sc, WMREG_FWSM);
   13845 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13846 				blocked = true;
   13847 				delay(10*1000);
   13848 				continue;
   13849 			}
   13850 			blocked = false;
   13851 		} while (blocked && (i++ < 30));
   13852 		return blocked;
   13853 		break;
   13854 	case WM_T_82571:
   13855 	case WM_T_82572:
   13856 	case WM_T_82573:
   13857 	case WM_T_82574:
   13858 	case WM_T_82583:
   13859 	case WM_T_80003:
   13860 		reg = CSR_READ(sc, WMREG_MANC);
   13861 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13862 			return true;
   13863 		else
   13864 			return false;
   13865 		break;
   13866 	default:
   13867 		/* no problem */
   13868 		break;
   13869 	}
   13870 
   13871 	return false;
   13872 }
   13873 
   13874 static void
   13875 wm_get_hw_control(struct wm_softc *sc)
   13876 {
   13877 	uint32_t reg;
   13878 
   13879 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13880 		device_xname(sc->sc_dev), __func__));
   13881 
   13882 	if (sc->sc_type == WM_T_82573) {
   13883 		reg = CSR_READ(sc, WMREG_SWSM);
   13884 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13885 	} else if (sc->sc_type >= WM_T_82571) {
   13886 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13887 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13888 	}
   13889 }
   13890 
   13891 static void
   13892 wm_release_hw_control(struct wm_softc *sc)
   13893 {
   13894 	uint32_t reg;
   13895 
   13896 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13897 		device_xname(sc->sc_dev), __func__));
   13898 
   13899 	if (sc->sc_type == WM_T_82573) {
   13900 		reg = CSR_READ(sc, WMREG_SWSM);
   13901 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13902 	} else if (sc->sc_type >= WM_T_82571) {
   13903 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13904 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13905 	}
   13906 }
   13907 
   13908 static void
   13909 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13910 {
   13911 	uint32_t reg;
   13912 
   13913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13914 		device_xname(sc->sc_dev), __func__));
   13915 
   13916 	if (sc->sc_type < WM_T_PCH2)
   13917 		return;
   13918 
   13919 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13920 
   13921 	if (gate)
   13922 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13923 	else
   13924 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13925 
   13926 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13927 }
   13928 
   13929 static int
   13930 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   13931 {
   13932 	uint32_t fwsm, reg;
   13933 	int rv = 0;
   13934 
   13935 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13936 		device_xname(sc->sc_dev), __func__));
   13937 
   13938 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13939 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13940 
   13941 	/* Disable ULP */
   13942 	wm_ulp_disable(sc);
   13943 
   13944 	/* Acquire PHY semaphore */
   13945 	rv = sc->phy.acquire(sc);
   13946 	if (rv != 0) {
   13947 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   13948 		device_xname(sc->sc_dev), __func__));
   13949 		return -1;
   13950 	}
   13951 
   13952 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   13953 	 * inaccessible and resetting the PHY is not blocked, toggle the
   13954 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   13955 	 */
   13956 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13957 	switch (sc->sc_type) {
   13958 	case WM_T_PCH_LPT:
   13959 	case WM_T_PCH_SPT:
   13960 	case WM_T_PCH_CNP:
   13961 		if (wm_phy_is_accessible_pchlan(sc))
   13962 			break;
   13963 
   13964 		/* Before toggling LANPHYPC, see if PHY is accessible by
   13965 		 * forcing MAC to SMBus mode first.
   13966 		 */
   13967 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13968 		reg |= CTRL_EXT_FORCE_SMBUS;
   13969 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13970 #if 0
   13971 		/* XXX Isn't this required??? */
   13972 		CSR_WRITE_FLUSH(sc);
   13973 #endif
   13974 		/* Wait 50 milliseconds for MAC to finish any retries
   13975 		 * that it might be trying to perform from previous
   13976 		 * attempts to acknowledge any phy read requests.
   13977 		 */
   13978 		delay(50 * 1000);
   13979 		/* FALLTHROUGH */
   13980 	case WM_T_PCH2:
   13981 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13982 			break;
   13983 		/* FALLTHROUGH */
   13984 	case WM_T_PCH:
   13985 		if (sc->sc_type == WM_T_PCH)
   13986 			if ((fwsm & FWSM_FW_VALID) != 0)
   13987 				break;
   13988 
   13989 		if (wm_phy_resetisblocked(sc) == true) {
   13990 			printf("XXX reset is blocked(3)\n");
   13991 			break;
   13992 		}
   13993 
   13994 		/* Toggle LANPHYPC Value bit */
   13995 		wm_toggle_lanphypc_pch_lpt(sc);
   13996 
   13997 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13998 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13999 				break;
   14000 
   14001 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14002 			 * so ensure that the MAC is also out of SMBus mode
   14003 			 */
   14004 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14005 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14006 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14007 
   14008 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14009 				break;
   14010 			rv = -1;
   14011 		}
   14012 		break;
   14013 	default:
   14014 		break;
   14015 	}
   14016 
   14017 	/* Release semaphore */
   14018 	sc->phy.release(sc);
   14019 
   14020 	if (rv == 0) {
   14021 		/* Check to see if able to reset PHY.  Print error if not */
   14022 		if (wm_phy_resetisblocked(sc)) {
   14023 			printf("XXX reset is blocked(4)\n");
   14024 			goto out;
   14025 		}
   14026 
   14027 		/* Reset the PHY before any access to it.  Doing so, ensures
   14028 		 * that the PHY is in a known good state before we read/write
   14029 		 * PHY registers.  The generic reset is sufficient here,
   14030 		 * because we haven't determined the PHY type yet.
   14031 		 */
   14032 		if (wm_reset_phy(sc) != 0)
   14033 			goto out;
   14034 
   14035 		/* On a successful reset, possibly need to wait for the PHY
   14036 		 * to quiesce to an accessible state before returning control
   14037 		 * to the calling function.  If the PHY does not quiesce, then
   14038 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14039 		 *  the PHY is in.
   14040 		 */
   14041 		if (wm_phy_resetisblocked(sc))
   14042 			printf("XXX reset is blocked(4)\n");
   14043 	}
   14044 
   14045 out:
   14046 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14047 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14048 		delay(10*1000);
   14049 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14050 	}
   14051 
   14052 	return 0;
   14053 }
   14054 
   14055 static void
   14056 wm_init_manageability(struct wm_softc *sc)
   14057 {
   14058 
   14059 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14060 		device_xname(sc->sc_dev), __func__));
   14061 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14062 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14063 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14064 
   14065 		/* Disable hardware interception of ARP */
   14066 		manc &= ~MANC_ARP_EN;
   14067 
   14068 		/* Enable receiving management packets to the host */
   14069 		if (sc->sc_type >= WM_T_82571) {
   14070 			manc |= MANC_EN_MNG2HOST;
   14071 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14072 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14073 		}
   14074 
   14075 		CSR_WRITE(sc, WMREG_MANC, manc);
   14076 	}
   14077 }
   14078 
   14079 static void
   14080 wm_release_manageability(struct wm_softc *sc)
   14081 {
   14082 
   14083 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14084 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14085 
   14086 		manc |= MANC_ARP_EN;
   14087 		if (sc->sc_type >= WM_T_82571)
   14088 			manc &= ~MANC_EN_MNG2HOST;
   14089 
   14090 		CSR_WRITE(sc, WMREG_MANC, manc);
   14091 	}
   14092 }
   14093 
   14094 static void
   14095 wm_get_wakeup(struct wm_softc *sc)
   14096 {
   14097 
   14098 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14099 	switch (sc->sc_type) {
   14100 	case WM_T_82573:
   14101 	case WM_T_82583:
   14102 		sc->sc_flags |= WM_F_HAS_AMT;
   14103 		/* FALLTHROUGH */
   14104 	case WM_T_80003:
   14105 	case WM_T_82575:
   14106 	case WM_T_82576:
   14107 	case WM_T_82580:
   14108 	case WM_T_I350:
   14109 	case WM_T_I354:
   14110 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14111 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14112 		/* FALLTHROUGH */
   14113 	case WM_T_82541:
   14114 	case WM_T_82541_2:
   14115 	case WM_T_82547:
   14116 	case WM_T_82547_2:
   14117 	case WM_T_82571:
   14118 	case WM_T_82572:
   14119 	case WM_T_82574:
   14120 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14121 		break;
   14122 	case WM_T_ICH8:
   14123 	case WM_T_ICH9:
   14124 	case WM_T_ICH10:
   14125 	case WM_T_PCH:
   14126 	case WM_T_PCH2:
   14127 	case WM_T_PCH_LPT:
   14128 	case WM_T_PCH_SPT:
   14129 	case WM_T_PCH_CNP:
   14130 		sc->sc_flags |= WM_F_HAS_AMT;
   14131 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14132 		break;
   14133 	default:
   14134 		break;
   14135 	}
   14136 
   14137 	/* 1: HAS_MANAGE */
   14138 	if (wm_enable_mng_pass_thru(sc) != 0)
   14139 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14140 
   14141 	/*
   14142 	 * Note that the WOL flags is set after the resetting of the eeprom
   14143 	 * stuff
   14144 	 */
   14145 }
   14146 
   14147 /*
   14148  * Unconfigure Ultra Low Power mode.
   14149  * Only for I217 and newer (see below).
   14150  */
   14151 static int
   14152 wm_ulp_disable(struct wm_softc *sc)
   14153 {
   14154 	uint32_t reg;
   14155 	uint16_t phyreg;
   14156 	int i = 0, rv = 0;
   14157 
   14158 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14159 		device_xname(sc->sc_dev), __func__));
   14160 	/* Exclude old devices */
   14161 	if ((sc->sc_type < WM_T_PCH_LPT)
   14162 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14163 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14164 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14165 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14166 		return 0;
   14167 
   14168 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14169 		/* Request ME un-configure ULP mode in the PHY */
   14170 		reg = CSR_READ(sc, WMREG_H2ME);
   14171 		reg &= ~H2ME_ULP;
   14172 		reg |= H2ME_ENFORCE_SETTINGS;
   14173 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14174 
   14175 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14176 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14177 			if (i++ == 30) {
   14178 				printf("%s timed out\n", __func__);
   14179 				return -1;
   14180 			}
   14181 			delay(10 * 1000);
   14182 		}
   14183 		reg = CSR_READ(sc, WMREG_H2ME);
   14184 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14185 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14186 
   14187 		return 0;
   14188 	}
   14189 
   14190 	/* Acquire semaphore */
   14191 	rv = sc->phy.acquire(sc);
   14192 	if (rv != 0) {
   14193 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14194 		device_xname(sc->sc_dev), __func__));
   14195 		goto release;
   14196 	}
   14197 
   14198 	/* Toggle LANPHYPC */
   14199 	wm_toggle_lanphypc_pch_lpt(sc);
   14200 
   14201 	/* Unforce SMBus mode in PHY */
   14202 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14203 	if (rv != 0) {
   14204 		uint32_t reg2;
   14205 
   14206 		printf("%s: Force SMBus first.\n", __func__);
   14207 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14208 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14209 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14210 		delay(50 * 1000);
   14211 
   14212 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14213 		    &phyreg);
   14214 		if (rv != 0)
   14215 			goto release;
   14216 	}
   14217 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14218 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14219 
   14220 	/* Unforce SMBus mode in MAC */
   14221 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14222 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14223 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14224 
   14225 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14226 	if (rv != 0)
   14227 		goto release;
   14228 	phyreg |= HV_PM_CTRL_K1_ENA;
   14229 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14230 
   14231 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14232 		&phyreg);
   14233 	if (rv != 0)
   14234 		goto release;
   14235 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14236 	    | I218_ULP_CONFIG1_STICKY_ULP
   14237 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14238 	    | I218_ULP_CONFIG1_WOL_HOST
   14239 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14240 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14241 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14242 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14243 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14244 	phyreg |= I218_ULP_CONFIG1_START;
   14245 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14246 
   14247 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14248 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14249 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14250 
   14251 release:
   14252 	/* Release semaphore */
   14253 	sc->phy.release(sc);
   14254 	wm_gmii_reset(sc);
   14255 	delay(50 * 1000);
   14256 
   14257 	return rv;
   14258 }
   14259 
   14260 /* WOL in the newer chipset interfaces (pchlan) */
   14261 static void
   14262 wm_enable_phy_wakeup(struct wm_softc *sc)
   14263 {
   14264 #if 0
   14265 	uint16_t preg;
   14266 
   14267 	/* Copy MAC RARs to PHY RARs */
   14268 
   14269 	/* Copy MAC MTA to PHY MTA */
   14270 
   14271 	/* Configure PHY Rx Control register */
   14272 
   14273 	/* Enable PHY wakeup in MAC register */
   14274 
   14275 	/* Configure and enable PHY wakeup in PHY registers */
   14276 
   14277 	/* Activate PHY wakeup */
   14278 
   14279 	/* XXX */
   14280 #endif
   14281 }
   14282 
   14283 /* Power down workaround on D3 */
   14284 static void
   14285 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14286 {
   14287 	uint32_t reg;
   14288 	int i;
   14289 
   14290 	for (i = 0; i < 2; i++) {
   14291 		/* Disable link */
   14292 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14293 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14294 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14295 
   14296 		/*
   14297 		 * Call gig speed drop workaround on Gig disable before
   14298 		 * accessing any PHY registers
   14299 		 */
   14300 		if (sc->sc_type == WM_T_ICH8)
   14301 			wm_gig_downshift_workaround_ich8lan(sc);
   14302 
   14303 		/* Write VR power-down enable */
   14304 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14305 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14306 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14307 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14308 
   14309 		/* Read it back and test */
   14310 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14311 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14312 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14313 			break;
   14314 
   14315 		/* Issue PHY reset and repeat at most one more time */
   14316 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14317 	}
   14318 }
   14319 
   14320 /*
   14321  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14322  *  @sc: pointer to the HW structure
   14323  *
   14324  *  During S0 to Sx transition, it is possible the link remains at gig
   14325  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14326  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14327  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14328  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14329  *  needs to be written.
   14330  *  Parts that support (and are linked to a partner which support) EEE in
   14331  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14332  *  than 10Mbps w/o EEE.
   14333  */
   14334 static void
   14335 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14336 {
   14337 	uint32_t phy_ctrl;
   14338 
   14339 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14340 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14341 
   14342 	if (sc->sc_phytype == WMPHY_I217) {
   14343 		uint16_t devid = sc->sc_pcidevid;
   14344 
   14345 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14346 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14347 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14348 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14349 		    (sc->sc_type >= WM_T_PCH_SPT))
   14350 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14351 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14352 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14353 
   14354 #if 0 /* notyet */
   14355 		if (sc->phy.acquire(sc) != 0)
   14356 			goto out;
   14357 
   14358 		/* XXX Do workaround for EEE */
   14359 
   14360 		/*
   14361 		 * For i217 Intel Rapid Start Technology support,
   14362 		 * when the system is going into Sx and no manageability engine
   14363 		 * is present, the driver must configure proxy to reset only on
   14364 		 * power good.	LPI (Low Power Idle) state must also reset only
   14365 		 * on power good, as well as the MTA (Multicast table array).
   14366 		 * The SMBus release must also be disabled on LCD reset.
   14367 		 */
   14368 
   14369 		/*
   14370 		 * Enable MTA to reset for Intel Rapid Start Technology
   14371 		 * Support
   14372 		 */
   14373 
   14374 		sc->phy.release(sc);
   14375 #endif
   14376 	}
   14377 #if 0
   14378 out:
   14379 #endif
   14380 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14381 
   14382 	if (sc->sc_type == WM_T_ICH8)
   14383 		wm_gig_downshift_workaround_ich8lan(sc);
   14384 
   14385 	if (sc->sc_type >= WM_T_PCH) {
   14386 		wm_oem_bits_config_ich8lan(sc, false);
   14387 
   14388 		/* Reset PHY to activate OEM bits on 82577/8 */
   14389 		if (sc->sc_type == WM_T_PCH)
   14390 			wm_reset_phy(sc);
   14391 
   14392 		if (sc->phy.acquire(sc) != 0)
   14393 			return;
   14394 		wm_write_smbus_addr(sc);
   14395 		sc->phy.release(sc);
   14396 	}
   14397 }
   14398 
   14399 /*
   14400  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14401  *  @hw: pointer to the HW structure
   14402  *
   14403  *  During Sx to S0 transitions on non-managed devices or managed devices
   14404  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14405  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14406  *  the PHY.
   14407  *  On i217, setup Intel Rapid Start Technology.
   14408  */
   14409 static int
   14410 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14411 {
   14412 	device_t dev = sc->sc_dev;
   14413 	int rv;
   14414 
   14415 	if (sc->sc_type < WM_T_PCH2)
   14416 		return 0;
   14417 
   14418 	rv = wm_init_phy_workarounds_pchlan(sc);
   14419 	if (rv != 0)
   14420 		return -1;
   14421 
   14422 	/* For i217 Intel Rapid Start Technology support when the system
   14423 	 * is transitioning from Sx and no manageability engine is present
   14424 	 * configure SMBus to restore on reset, disable proxy, and enable
   14425 	 * the reset on MTA (Multicast table array).
   14426 	 */
   14427 	if (sc->sc_phytype == WMPHY_I217) {
   14428 		uint16_t phy_reg;
   14429 
   14430 		if (sc->phy.acquire(sc) != 0)
   14431 			goto release;
   14432 
   14433 		/* Clear Auto Enable LPI after link up */
   14434 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14435 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14436 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14437 
   14438 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14439 			/* Restore clear on SMB if no manageability engine
   14440 			 * is present
   14441 			 */
   14442 			sc->phy.readreg_locked(dev, 1, I217_MEMPWR, &phy_reg);
   14443 			if (rv != 0)
   14444 				goto release;
   14445 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14446 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14447 
   14448 			/* Disable Proxy */
   14449 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14450 		}
   14451 		/* Enable reset on MTA */
   14452 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14453 		if (rv != 0)
   14454 			goto release;
   14455 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14456 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14457 
   14458 release:
   14459 		sc->phy.release(sc);
   14460 		return rv;
   14461 	}
   14462 
   14463 	return 0;
   14464 }
   14465 
   14466 static void
   14467 wm_enable_wakeup(struct wm_softc *sc)
   14468 {
   14469 	uint32_t reg, pmreg;
   14470 	pcireg_t pmode;
   14471 
   14472 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14473 		device_xname(sc->sc_dev), __func__));
   14474 
   14475 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14476 		&pmreg, NULL) == 0)
   14477 		return;
   14478 
   14479 	/* Advertise the wakeup capability */
   14480 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14481 	    | CTRL_SWDPIN(3));
   14482 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14483 
   14484 	/* Keep the laser running on fiber adapters */
   14485 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14486 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14487 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14488 		reg |= CTRL_EXT_SWDPIN(3);
   14489 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14490 	}
   14491 
   14492 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   14493 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
   14494 		wm_suspend_workarounds_ich8lan(sc);
   14495 
   14496 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14497 #if 0	/* for the multicast packet */
   14498 	reg |= WUFC_MC;
   14499 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14500 #endif
   14501 
   14502 	if (sc->sc_type >= WM_T_PCH)
   14503 		wm_enable_phy_wakeup(sc);
   14504 	else {
   14505 		/* Enable wakeup by the MAC */
   14506 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14507 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14508 	}
   14509 
   14510 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14511 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14512 		|| (sc->sc_type == WM_T_PCH2))
   14513 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14514 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14515 
   14516 	/* Request PME */
   14517 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14518 #if 0
   14519 	/* Disable WOL */
   14520 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14521 #else
   14522 	/* For WOL */
   14523 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14524 #endif
   14525 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14526 }
   14527 
   14528 /* Disable ASPM L0s and/or L1 for workaround */
   14529 static void
   14530 wm_disable_aspm(struct wm_softc *sc)
   14531 {
   14532 	pcireg_t reg, mask = 0;
   14533 	unsigned const char *str = "";
   14534 
   14535 	/*
   14536 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14537 	 * space.
   14538 	 */
   14539 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14540 		return;
   14541 
   14542 	switch (sc->sc_type) {
   14543 	case WM_T_82571:
   14544 	case WM_T_82572:
   14545 		/*
   14546 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14547 		 * State Power management L1 State (ASPM L1).
   14548 		 */
   14549 		mask = PCIE_LCSR_ASPM_L1;
   14550 		str = "L1 is";
   14551 		break;
   14552 	case WM_T_82573:
   14553 	case WM_T_82574:
   14554 	case WM_T_82583:
   14555 		/*
   14556 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14557 		 *
   14558 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14559 		 * some chipset.  The document of 82574 and 82583 says that
   14560 		 * disabling L0s with some specific chipset is sufficient,
   14561 		 * but we follow as of the Intel em driver does.
   14562 		 *
   14563 		 * References:
   14564 		 * Errata 8 of the Specification Update of i82573.
   14565 		 * Errata 20 of the Specification Update of i82574.
   14566 		 * Errata 9 of the Specification Update of i82583.
   14567 		 */
   14568 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14569 		str = "L0s and L1 are";
   14570 		break;
   14571 	default:
   14572 		return;
   14573 	}
   14574 
   14575 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14576 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14577 	reg &= ~mask;
   14578 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14579 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14580 
   14581 	/* Print only in wm_attach() */
   14582 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14583 		aprint_verbose_dev(sc->sc_dev,
   14584 		    "ASPM %s disabled to workaround the errata.\n", str);
   14585 }
   14586 
   14587 /* LPLU */
   14588 
   14589 static void
   14590 wm_lplu_d0_disable(struct wm_softc *sc)
   14591 {
   14592 	struct mii_data *mii = &sc->sc_mii;
   14593 	uint32_t reg;
   14594 
   14595 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14596 		device_xname(sc->sc_dev), __func__));
   14597 
   14598 	if (sc->sc_phytype == WMPHY_IFE)
   14599 		return;
   14600 
   14601 	switch (sc->sc_type) {
   14602 	case WM_T_82571:
   14603 	case WM_T_82572:
   14604 	case WM_T_82573:
   14605 	case WM_T_82575:
   14606 	case WM_T_82576:
   14607 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14608 		reg &= ~PMR_D0_LPLU;
   14609 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14610 		break;
   14611 	case WM_T_82580:
   14612 	case WM_T_I350:
   14613 	case WM_T_I210:
   14614 	case WM_T_I211:
   14615 		reg = CSR_READ(sc, WMREG_PHPM);
   14616 		reg &= ~PHPM_D0A_LPLU;
   14617 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14618 		break;
   14619 	case WM_T_82574:
   14620 	case WM_T_82583:
   14621 	case WM_T_ICH8:
   14622 	case WM_T_ICH9:
   14623 	case WM_T_ICH10:
   14624 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14625 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14626 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14627 		CSR_WRITE_FLUSH(sc);
   14628 		break;
   14629 	case WM_T_PCH:
   14630 	case WM_T_PCH2:
   14631 	case WM_T_PCH_LPT:
   14632 	case WM_T_PCH_SPT:
   14633 	case WM_T_PCH_CNP:
   14634 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14635 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14636 		if (wm_phy_resetisblocked(sc) == false)
   14637 			reg |= HV_OEM_BITS_ANEGNOW;
   14638 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14639 		break;
   14640 	default:
   14641 		break;
   14642 	}
   14643 }
   14644 
   14645 /* EEE */
   14646 
   14647 static void
   14648 wm_set_eee_i350(struct wm_softc *sc)
   14649 {
   14650 	uint32_t ipcnfg, eeer;
   14651 
   14652 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14653 	eeer = CSR_READ(sc, WMREG_EEER);
   14654 
   14655 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14656 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14657 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14658 		    | EEER_LPI_FC);
   14659 	} else {
   14660 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14661 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14662 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14663 		    | EEER_LPI_FC);
   14664 	}
   14665 
   14666 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14667 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14668 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14669 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14670 }
   14671 
   14672 /*
   14673  * Workarounds (mainly PHY related).
   14674  * Basically, PHY's workarounds are in the PHY drivers.
   14675  */
   14676 
   14677 /* Work-around for 82566 Kumeran PCS lock loss */
   14678 static void
   14679 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14680 {
   14681 	struct mii_data *mii = &sc->sc_mii;
   14682 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14683 	int i;
   14684 	int reg;
   14685 
   14686 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14687 		device_xname(sc->sc_dev), __func__));
   14688 
   14689 	/* If the link is not up, do nothing */
   14690 	if ((status & STATUS_LU) == 0)
   14691 		return;
   14692 
   14693 	/* Nothing to do if the link is other than 1Gbps */
   14694 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14695 		return;
   14696 
   14697 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14698 	for (i = 0; i < 10; i++) {
   14699 		/* read twice */
   14700 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14701 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14702 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14703 			goto out;	/* GOOD! */
   14704 
   14705 		/* Reset the PHY */
   14706 		wm_reset_phy(sc);
   14707 		delay(5*1000);
   14708 	}
   14709 
   14710 	/* Disable GigE link negotiation */
   14711 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14712 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14713 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14714 
   14715 	/*
   14716 	 * Call gig speed drop workaround on Gig disable before accessing
   14717 	 * any PHY registers.
   14718 	 */
   14719 	wm_gig_downshift_workaround_ich8lan(sc);
   14720 
   14721 out:
   14722 	return;
   14723 }
   14724 
   14725 /*
   14726  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   14727  *  @sc: pointer to the HW structure
   14728  *
   14729  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   14730  *  LPLU, Gig disable, MDIC PHY reset):
   14731  *    1) Set Kumeran Near-end loopback
   14732  *    2) Clear Kumeran Near-end loopback
   14733  *  Should only be called for ICH8[m] devices with any 1G Phy.
   14734  */
   14735 static void
   14736 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14737 {
   14738 	uint16_t kmreg;
   14739 
   14740 	/* Only for igp3 */
   14741 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14742 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14743 			return;
   14744 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14745 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14746 			return;
   14747 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14748 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14749 	}
   14750 }
   14751 
   14752 /*
   14753  * Workaround for pch's PHYs
   14754  * XXX should be moved to new PHY driver?
   14755  */
   14756 static void
   14757 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14758 {
   14759 
   14760 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14761 		device_xname(sc->sc_dev), __func__));
   14762 	KASSERT(sc->sc_type == WM_T_PCH);
   14763 
   14764 	if (sc->sc_phytype == WMPHY_82577)
   14765 		wm_set_mdio_slow_mode_hv(sc);
   14766 
   14767 	/* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14768 
   14769 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14770 
   14771 	/* 82578 */
   14772 	if (sc->sc_phytype == WMPHY_82578) {
   14773 		struct mii_softc *child;
   14774 
   14775 		/*
   14776 		 * Return registers to default by doing a soft reset then
   14777 		 * writing 0x3140 to the control register
   14778 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14779 		 */
   14780 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14781 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14782 			PHY_RESET(child);
   14783 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14784 			    0x3140);
   14785 		}
   14786 	}
   14787 
   14788 	/* Select page 0 */
   14789 	sc->phy.acquire(sc);
   14790 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14791 	sc->phy.release(sc);
   14792 
   14793 	/*
   14794 	 * Configure the K1 Si workaround during phy reset assuming there is
   14795 	 * link so that it disables K1 if link is in 1Gbps.
   14796 	 */
   14797 	wm_k1_gig_workaround_hv(sc, 1);
   14798 }
   14799 
   14800 /*
   14801  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   14802  *  done after every PHY reset.
   14803  */
   14804 static void
   14805 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14806 {
   14807 
   14808 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14809 		device_xname(sc->sc_dev), __func__));
   14810 	KASSERT(sc->sc_type == WM_T_PCH2);
   14811 
   14812 	/* Set MDIO slow mode before any other MDIO access */
   14813 	wm_set_mdio_slow_mode_hv(sc);
   14814 
   14815 	/* XXX set MSE higher to enable link to stay up when noise is high */
   14816 	/* XXX drop link after 5 times MSE threshold was reached */
   14817 }
   14818 
   14819 /**
   14820  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14821  *  @link: link up bool flag
   14822  *
   14823  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14824  *  preventing further DMA write requests.  Workaround the issue by disabling
   14825  *  the de-assertion of the clock request when in 1Gpbs mode.
   14826  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14827  *  speeds in order to avoid Tx hangs.
   14828  **/
   14829 static int
   14830 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14831 {
   14832 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14833 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14834 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14835 	uint16_t phyreg;
   14836 
   14837 	if (link && (speed == STATUS_SPEED_1000)) {
   14838 		sc->phy.acquire(sc);
   14839 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14840 		    &phyreg);
   14841 		if (rv != 0)
   14842 			goto release;
   14843 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14844 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14845 		if (rv != 0)
   14846 			goto release;
   14847 		delay(20);
   14848 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14849 
   14850 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14851 		    &phyreg);
   14852 release:
   14853 		sc->phy.release(sc);
   14854 		return rv;
   14855 	}
   14856 
   14857 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14858 
   14859 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14860 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   14861 	    || !link
   14862 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14863 		goto update_fextnvm6;
   14864 
   14865 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14866 
   14867 	/* Clear link status transmit timeout */
   14868 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14869 	if (speed == STATUS_SPEED_100) {
   14870 		/* Set inband Tx timeout to 5x10us for 100Half */
   14871 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14872 
   14873 		/* Do not extend the K1 entry latency for 100Half */
   14874 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14875 	} else {
   14876 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14877 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14878 
   14879 		/* Extend the K1 entry latency for 10 Mbps */
   14880 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14881 	}
   14882 
   14883 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14884 
   14885 update_fextnvm6:
   14886 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14887 	return 0;
   14888 }
   14889 
   14890 /*
   14891  *  wm_k1_gig_workaround_hv - K1 Si workaround
   14892  *  @sc:   pointer to the HW structure
   14893  *  @link: link up bool flag
   14894  *
   14895  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   14896  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   14897  *  If link is down, the function will restore the default K1 setting located
   14898  *  in the NVM.
   14899  */
   14900 static int
   14901 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14902 {
   14903 	int k1_enable = sc->sc_nvm_k1_enabled;
   14904 
   14905 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14906 		device_xname(sc->sc_dev), __func__));
   14907 
   14908 	if (sc->phy.acquire(sc) != 0)
   14909 		return -1;
   14910 
   14911 	if (link) {
   14912 		k1_enable = 0;
   14913 
   14914 		/* Link stall fix for link up */
   14915 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14916 		    0x0100);
   14917 	} else {
   14918 		/* Link stall fix for link down */
   14919 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14920 		    0x4100);
   14921 	}
   14922 
   14923 	wm_configure_k1_ich8lan(sc, k1_enable);
   14924 	sc->phy.release(sc);
   14925 
   14926 	return 0;
   14927 }
   14928 
   14929 /*
   14930  *  wm_k1_workaround_lv - K1 Si workaround
   14931  *  @sc:   pointer to the HW structure
   14932  *
   14933  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   14934  *  Disable K1 for 1000 and 100 speeds
   14935  */
   14936 static int
   14937 wm_k1_workaround_lv(struct wm_softc *sc)
   14938 {
   14939 	uint32_t reg;
   14940 	int phyreg;
   14941 
   14942 	if (sc->sc_type != WM_T_PCH2)
   14943 		return 0;
   14944 
   14945 	/* Set K1 beacon duration based on 10Mbps speed */
   14946 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS);
   14947 
   14948 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   14949 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   14950 		if (phyreg &
   14951 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   14952 			/* LV 1G/100 Packet drop issue wa  */
   14953 			phyreg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL);
   14954 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   14955 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL, phyreg);
   14956 		} else {
   14957 			/* For 10Mbps */
   14958 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   14959 			reg &= ~FEXTNVM4_BEACON_DURATION;
   14960 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   14961 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   14962 		}
   14963 	}
   14964 
   14965 	return 0;
   14966 }
   14967 
   14968 /*
   14969  *  wm_link_stall_workaround_hv - Si workaround
   14970  *  @sc: pointer to the HW structure
   14971  *
   14972  *  This function works around a Si bug where the link partner can get
   14973  *  a link up indication before the PHY does. If small packets are sent
   14974  *  by the link partner they can be placed in the packet buffer without
   14975  *  being properly accounted for by the PHY and will stall preventing
   14976  *  further packets from being received.  The workaround is to clear the
   14977  *  packet buffer after the PHY detects link up.
   14978  */
   14979 static int
   14980 wm_link_stall_workaround_hv(struct wm_softc *sc)
   14981 {
   14982 	int phyreg;
   14983 
   14984 	if (sc->sc_phytype != WMPHY_82578)
   14985 		return 0;
   14986 
   14987 	/* Do not apply workaround if in PHY loopback bit 14 set */
   14988 	phyreg =  wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR);
   14989 	if ((phyreg & BMCR_LOOP) != 0)
   14990 		return 0;
   14991 
   14992 	/* check if link is up and at 1Gbps */
   14993 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS);
   14994 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   14995 	    | BM_CS_STATUS_SPEED_MASK;
   14996 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   14997 		| BM_CS_STATUS_SPEED_1000))
   14998 		return 0;
   14999 
   15000 	delay(200 * 1000);	/* XXX too big */
   15001 
   15002 	/* flush the packets in the fifo buffer */
   15003 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15004 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15005 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15006 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15007 
   15008 	return 0;
   15009 }
   15010 
   15011 static void
   15012 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15013 {
   15014 	uint32_t reg;
   15015 
   15016 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   15017 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15018 	    reg | HV_KMRN_MDIO_SLOW);
   15019 }
   15020 
   15021 /*
   15022  *  wm_configure_k1_ich8lan - Configure K1 power state
   15023  *  @sc: pointer to the HW structure
   15024  *  @enable: K1 state to configure
   15025  *
   15026  *  Configure the K1 power state based on the provided parameter.
   15027  *  Assumes semaphore already acquired.
   15028  */
   15029 static void
   15030 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15031 {
   15032 	uint32_t ctrl, ctrl_ext, tmp;
   15033 	uint16_t kmreg;
   15034 	int rv;
   15035 
   15036 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15037 
   15038 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15039 	if (rv != 0)
   15040 		return;
   15041 
   15042 	if (k1_enable)
   15043 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15044 	else
   15045 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15046 
   15047 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15048 	if (rv != 0)
   15049 		return;
   15050 
   15051 	delay(20);
   15052 
   15053 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15054 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15055 
   15056 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15057 	tmp |= CTRL_FRCSPD;
   15058 
   15059 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15060 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15061 	CSR_WRITE_FLUSH(sc);
   15062 	delay(20);
   15063 
   15064 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15065 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15066 	CSR_WRITE_FLUSH(sc);
   15067 	delay(20);
   15068 
   15069 	return;
   15070 }
   15071 
   15072 /* special case - for 82575 - need to do manual init ... */
   15073 static void
   15074 wm_reset_init_script_82575(struct wm_softc *sc)
   15075 {
   15076 	/*
   15077 	 * remark: this is untested code - we have no board without EEPROM
   15078 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15079 	 */
   15080 
   15081 	/* SerDes configuration via SERDESCTRL */
   15082 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15083 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15084 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15085 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15086 
   15087 	/* CCM configuration via CCMCTL register */
   15088 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15089 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15090 
   15091 	/* PCIe lanes configuration */
   15092 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15093 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15094 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15095 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15096 
   15097 	/* PCIe PLL Configuration */
   15098 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15099 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15100 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15101 }
   15102 
   15103 static void
   15104 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15105 {
   15106 	uint32_t reg;
   15107 	uint16_t nvmword;
   15108 	int rv;
   15109 
   15110 	if (sc->sc_type != WM_T_82580)
   15111 		return;
   15112 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15113 		return;
   15114 
   15115 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15116 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15117 	if (rv != 0) {
   15118 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15119 		    __func__);
   15120 		return;
   15121 	}
   15122 
   15123 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15124 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15125 		reg |= MDICNFG_DEST;
   15126 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15127 		reg |= MDICNFG_COM_MDIO;
   15128 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15129 }
   15130 
   15131 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15132 
   15133 static bool
   15134 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15135 {
   15136 	uint32_t reg;
   15137 	uint16_t id1, id2;
   15138 	int i, rv;
   15139 
   15140 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15141 		device_xname(sc->sc_dev), __func__));
   15142 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15143 
   15144 	id1 = id2 = 0xffff;
   15145 	for (i = 0; i < 2; i++) {
   15146 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15147 		    &id1);
   15148 		if ((rv != 0) || MII_INVALIDID(id1))
   15149 			continue;
   15150 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15151 		    &id2);
   15152 		if ((rv != 0) || MII_INVALIDID(id2))
   15153 			continue;
   15154 		break;
   15155 	}
   15156 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15157 		goto out;
   15158 
   15159 	/*
   15160 	 * In case the PHY needs to be in mdio slow mode,
   15161 	 * set slow mode and try to get the PHY id again.
   15162 	 */
   15163 	if (sc->sc_type < WM_T_PCH_LPT) {
   15164 		sc->phy.release(sc);
   15165 		wm_set_mdio_slow_mode_hv(sc);
   15166 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   15167 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   15168 		sc->phy.acquire(sc);
   15169 	}
   15170 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15171 		printf("XXX return with false\n");
   15172 		return false;
   15173 	}
   15174 out:
   15175 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15176 		/* Only unforce SMBus if ME is not active */
   15177 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15178 			uint16_t phyreg;
   15179 
   15180 			/* Unforce SMBus mode in PHY */
   15181 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15182 			    CV_SMB_CTRL, &phyreg);
   15183 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15184 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15185 			    CV_SMB_CTRL, phyreg);
   15186 
   15187 			/* Unforce SMBus mode in MAC */
   15188 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15189 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15190 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15191 		}
   15192 	}
   15193 	return true;
   15194 }
   15195 
   15196 static void
   15197 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15198 {
   15199 	uint32_t reg;
   15200 	int i;
   15201 
   15202 	/* Set PHY Config Counter to 50msec */
   15203 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15204 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15205 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15206 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15207 
   15208 	/* Toggle LANPHYPC */
   15209 	reg = CSR_READ(sc, WMREG_CTRL);
   15210 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15211 	reg &= ~CTRL_LANPHYPC_VALUE;
   15212 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15213 	CSR_WRITE_FLUSH(sc);
   15214 	delay(1000);
   15215 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15216 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15217 	CSR_WRITE_FLUSH(sc);
   15218 
   15219 	if (sc->sc_type < WM_T_PCH_LPT)
   15220 		delay(50 * 1000);
   15221 	else {
   15222 		i = 20;
   15223 
   15224 		do {
   15225 			delay(5 * 1000);
   15226 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15227 		    && i--);
   15228 
   15229 		delay(30 * 1000);
   15230 	}
   15231 }
   15232 
   15233 static int
   15234 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15235 {
   15236 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   15237 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   15238 	uint32_t rxa;
   15239 	uint16_t scale = 0, lat_enc = 0;
   15240 	int32_t obff_hwm = 0;
   15241 	int64_t lat_ns, value;
   15242 
   15243 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15244 		device_xname(sc->sc_dev), __func__));
   15245 
   15246 	if (link) {
   15247 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   15248 		uint32_t status;
   15249 		uint16_t speed;
   15250 		pcireg_t preg;
   15251 
   15252 		status = CSR_READ(sc, WMREG_STATUS);
   15253 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   15254 		case STATUS_SPEED_10:
   15255 			speed = 10;
   15256 			break;
   15257 		case STATUS_SPEED_100:
   15258 			speed = 100;
   15259 			break;
   15260 		case STATUS_SPEED_1000:
   15261 			speed = 1000;
   15262 			break;
   15263 		default:
   15264 			device_printf(sc->sc_dev, "Unknown speed "
   15265 			    "(status = %08x)\n", status);
   15266 			return -1;
   15267 		}
   15268 
   15269 		/* Rx Packet Buffer Allocation size (KB) */
   15270 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   15271 
   15272 		/*
   15273 		 * Determine the maximum latency tolerated by the device.
   15274 		 *
   15275 		 * Per the PCIe spec, the tolerated latencies are encoded as
   15276 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   15277 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   15278 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   15279 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   15280 		 */
   15281 		lat_ns = ((int64_t)rxa * 1024 -
   15282 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   15283 			+ ETHER_HDR_LEN))) * 8 * 1000;
   15284 		if (lat_ns < 0)
   15285 			lat_ns = 0;
   15286 		else
   15287 			lat_ns /= speed;
   15288 		value = lat_ns;
   15289 
   15290 		while (value > LTRV_VALUE) {
   15291 			scale ++;
   15292 			value = howmany(value, __BIT(5));
   15293 		}
   15294 		if (scale > LTRV_SCALE_MAX) {
   15295 			printf("%s: Invalid LTR latency scale %d\n",
   15296 			    device_xname(sc->sc_dev), scale);
   15297 			return -1;
   15298 		}
   15299 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   15300 
   15301 		/* Determine the maximum latency tolerated by the platform */
   15302 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15303 		    WM_PCI_LTR_CAP_LPT);
   15304 		max_snoop = preg & 0xffff;
   15305 		max_nosnoop = preg >> 16;
   15306 
   15307 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   15308 
   15309 		if (lat_enc > max_ltr_enc) {
   15310 			lat_enc = max_ltr_enc;
   15311 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   15312 			    * PCI_LTR_SCALETONS(
   15313 				    __SHIFTOUT(lat_enc,
   15314 					PCI_LTR_MAXSNOOPLAT_SCALE));
   15315 		}
   15316 
   15317 		if (lat_ns) {
   15318 			lat_ns *= speed * 1000;
   15319 			lat_ns /= 8;
   15320 			lat_ns /= 1000000000;
   15321 			obff_hwm = (int32_t)(rxa - lat_ns);
   15322 		}
   15323 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   15324 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   15325 			    "(rxa = %d, lat_ns = %d)\n",
   15326 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   15327 			return -1;
   15328 		}
   15329 	}
   15330 	/* Snoop and No-Snoop latencies the same */
   15331 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   15332 	CSR_WRITE(sc, WMREG_LTRV, reg);
   15333 
   15334 	/* Set OBFF high water mark */
   15335 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   15336 	reg |= obff_hwm;
   15337 	CSR_WRITE(sc, WMREG_SVT, reg);
   15338 
   15339 	/* Enable OBFF */
   15340 	reg = CSR_READ(sc, WMREG_SVCR);
   15341 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   15342 	CSR_WRITE(sc, WMREG_SVCR, reg);
   15343 
   15344 	return 0;
   15345 }
   15346 
   15347 /*
   15348  * I210 Errata 25 and I211 Errata 10
   15349  * Slow System Clock.
   15350  */
   15351 static void
   15352 wm_pll_workaround_i210(struct wm_softc *sc)
   15353 {
   15354 	uint32_t mdicnfg, wuc;
   15355 	uint32_t reg;
   15356 	pcireg_t pcireg;
   15357 	uint32_t pmreg;
   15358 	uint16_t nvmword, tmp_nvmword;
   15359 	int phyval;
   15360 	bool wa_done = false;
   15361 	int i;
   15362 
   15363 	/* Save WUC and MDICNFG registers */
   15364 	wuc = CSR_READ(sc, WMREG_WUC);
   15365 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   15366 
   15367 	reg = mdicnfg & ~MDICNFG_DEST;
   15368 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15369 
   15370 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   15371 		nvmword = INVM_DEFAULT_AL;
   15372 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   15373 
   15374 	/* Get Power Management cap offset */
   15375 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15376 		&pmreg, NULL) == 0)
   15377 		return;
   15378 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   15379 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   15380 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   15381 
   15382 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   15383 			break; /* OK */
   15384 		}
   15385 
   15386 		wa_done = true;
   15387 		/* Directly reset the internal PHY */
   15388 		reg = CSR_READ(sc, WMREG_CTRL);
   15389 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   15390 
   15391 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15392 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   15393 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15394 
   15395 		CSR_WRITE(sc, WMREG_WUC, 0);
   15396 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   15397 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15398 
   15399 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15400 		    pmreg + PCI_PMCSR);
   15401 		pcireg |= PCI_PMCSR_STATE_D3;
   15402 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15403 		    pmreg + PCI_PMCSR, pcireg);
   15404 		delay(1000);
   15405 		pcireg &= ~PCI_PMCSR_STATE_D3;
   15406 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15407 		    pmreg + PCI_PMCSR, pcireg);
   15408 
   15409 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   15410 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15411 
   15412 		/* Restore WUC register */
   15413 		CSR_WRITE(sc, WMREG_WUC, wuc);
   15414 	}
   15415 
   15416 	/* Restore MDICNFG setting */
   15417 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   15418 	if (wa_done)
   15419 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   15420 }
   15421 
   15422 static void
   15423 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   15424 {
   15425 	uint32_t reg;
   15426 
   15427 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15428 		device_xname(sc->sc_dev), __func__));
   15429 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   15430 	    || (sc->sc_type == WM_T_PCH_CNP));
   15431 
   15432 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15433 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   15434 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15435 
   15436 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   15437 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   15438 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   15439 }
   15440