Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.612
      1 /*	$NetBSD: if_wm.c,v 1.612 2018/12/30 04:18:09 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.612 2018/12/30 04:18:09 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_flowflags;		/* 802.3x flow control flags */
    518 	int sc_align_tweak;
    519 
    520 	void *sc_ihs[WM_MAX_NINTR];	/*
    521 					 * interrupt cookie.
    522 					 * - legacy and msi use sc_ihs[0] only
    523 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    524 					 */
    525 	pci_intr_handle_t *sc_intrs;	/*
    526 					 * legacy and msi use sc_intrs[0] only
    527 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    528 					 */
    529 	int sc_nintrs;			/* number of interrupts */
    530 
    531 	int sc_link_intr_idx;		/* index of MSI-X tables */
    532 
    533 	callout_t sc_tick_ch;		/* tick callout */
    534 	bool sc_core_stopping;
    535 
    536 	int sc_nvm_ver_major;
    537 	int sc_nvm_ver_minor;
    538 	int sc_nvm_ver_build;
    539 	int sc_nvm_addrbits;		/* NVM address bits */
    540 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    541 	int sc_ich8_flash_base;
    542 	int sc_ich8_flash_bank_size;
    543 	int sc_nvm_k1_enabled;
    544 
    545 	int sc_nqueues;
    546 	struct wm_queue *sc_queue;
    547 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    548 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    549 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    550 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    551 
    552 	int sc_affinity_offset;
    553 
    554 #ifdef WM_EVENT_COUNTERS
    555 	/* Event counters. */
    556 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    557 
    558 	/* WM_T_82542_2_1 only */
    559 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    564 #endif /* WM_EVENT_COUNTERS */
    565 
    566 	/* This variable are used only on the 82547. */
    567 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    568 
    569 	uint32_t sc_ctrl;		/* prototype CTRL register */
    570 #if 0
    571 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    572 #endif
    573 	uint32_t sc_icr;		/* prototype interrupt bits */
    574 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    575 	uint32_t sc_tctl;		/* prototype TCTL register */
    576 	uint32_t sc_rctl;		/* prototype RCTL register */
    577 	uint32_t sc_txcw;		/* prototype TXCW register */
    578 	uint32_t sc_tipg;		/* prototype TIPG register */
    579 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    580 	uint32_t sc_pba;		/* prototype PBA register */
    581 
    582 	int sc_tbi_linkup;		/* TBI link status */
    583 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    584 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    585 
    586 	int sc_mchash_type;		/* multicast filter offset */
    587 
    588 	krndsource_t rnd_source;	/* random source */
    589 
    590 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    591 
    592 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    593 	kmutex_t *sc_ich_phymtx;	/*
    594 					 * 82574/82583/ICH/PCH specific PHY
    595 					 * mutex. For 82574/82583, the mutex
    596 					 * is used for both PHY and NVM.
    597 					 */
    598 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    599 
    600 	struct wm_phyop phy;
    601 	struct wm_nvmop nvm;
    602 };
    603 
    604 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    605 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    606 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    607 
    608 #define	WM_RXCHAIN_RESET(rxq)						\
    609 do {									\
    610 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    611 	*(rxq)->rxq_tailp = NULL;					\
    612 	(rxq)->rxq_len = 0;						\
    613 } while (/*CONSTCOND*/0)
    614 
    615 #define	WM_RXCHAIN_LINK(rxq, m)						\
    616 do {									\
    617 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    618 	(rxq)->rxq_tailp = &(m)->m_next;				\
    619 } while (/*CONSTCOND*/0)
    620 
    621 #ifdef WM_EVENT_COUNTERS
    622 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    623 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)			\
    626 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    627 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    628 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    629 #else /* !WM_EVENT_COUNTERS */
    630 #define	WM_EVCNT_INCR(ev)	/* nothing */
    631 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    632 
    633 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    634 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    635 #endif /* !WM_EVENT_COUNTERS */
    636 
    637 #define	CSR_READ(sc, reg)						\
    638 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    639 #define	CSR_WRITE(sc, reg, val)						\
    640 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    641 #define	CSR_WRITE_FLUSH(sc)						\
    642 	(void) CSR_READ((sc), WMREG_STATUS)
    643 
    644 #define ICH8_FLASH_READ32(sc, reg)					\
    645 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    646 	    (reg) + sc->sc_flashreg_offset)
    647 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    648 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset, (data))
    650 
    651 #define ICH8_FLASH_READ16(sc, reg)					\
    652 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset)
    654 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    655 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset, (data))
    657 
    658 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    659 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    660 
    661 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    662 #define	WM_CDTXADDR_HI(txq, x)						\
    663 	(sizeof(bus_addr_t) == 8 ?					\
    664 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    665 
    666 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    667 #define	WM_CDRXADDR_HI(rxq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    670 
    671 /*
    672  * Register read/write functions.
    673  * Other than CSR_{READ|WRITE}().
    674  */
    675 #if 0
    676 static inline uint32_t wm_io_read(struct wm_softc *, int);
    677 #endif
    678 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    679 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    680     uint32_t, uint32_t);
    681 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    682 
    683 /*
    684  * Descriptor sync/init functions.
    685  */
    686 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    687 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    688 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    689 
    690 /*
    691  * Device driver interface functions and commonly used functions.
    692  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    693  */
    694 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    695 static int	wm_match(device_t, cfdata_t, void *);
    696 static void	wm_attach(device_t, device_t, void *);
    697 static int	wm_detach(device_t, int);
    698 static bool	wm_suspend(device_t, const pmf_qual_t *);
    699 static bool	wm_resume(device_t, const pmf_qual_t *);
    700 static void	wm_watchdog(struct ifnet *);
    701 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_tick(void *);
    706 static int	wm_ifflags_cb(struct ethercom *);
    707 static int	wm_ioctl(struct ifnet *, u_long, void *);
    708 /* MAC address related */
    709 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    710 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    711 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    712 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    713 static int	wm_rar_count(struct wm_softc *);
    714 static void	wm_set_filter(struct wm_softc *);
    715 /* Reset and init related */
    716 static void	wm_set_vlan(struct wm_softc *);
    717 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    718 static void	wm_get_auto_rd_done(struct wm_softc *);
    719 static void	wm_lan_init_done(struct wm_softc *);
    720 static void	wm_get_cfg_done(struct wm_softc *);
    721 static void	wm_phy_post_reset(struct wm_softc *);
    722 static int	wm_write_smbus_addr(struct wm_softc *);
    723 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    724 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    725 static void	wm_initialize_hardware_bits(struct wm_softc *);
    726 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    727 static int	wm_reset_phy(struct wm_softc *);
    728 static void	wm_flush_desc_rings(struct wm_softc *);
    729 static void	wm_reset(struct wm_softc *);
    730 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    731 static void	wm_rxdrain(struct wm_rxqueue *);
    732 static void	wm_init_rss(struct wm_softc *);
    733 static void	wm_adjust_qnum(struct wm_softc *, int);
    734 static inline bool	wm_is_using_msix(struct wm_softc *);
    735 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    736 static int	wm_softint_establish(struct wm_softc *, int, int);
    737 static int	wm_setup_legacy(struct wm_softc *);
    738 static int	wm_setup_msix(struct wm_softc *);
    739 static int	wm_init(struct ifnet *);
    740 static int	wm_init_locked(struct ifnet *);
    741 static void	wm_unset_stopping_flags(struct wm_softc *);
    742 static void	wm_set_stopping_flags(struct wm_softc *);
    743 static void	wm_stop(struct ifnet *, int);
    744 static void	wm_stop_locked(struct ifnet *, int);
    745 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    746 static void	wm_82547_txfifo_stall(void *);
    747 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    748 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    749 /* DMA related */
    750 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    752 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    753 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    754     struct wm_txqueue *);
    755 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    756 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    757 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    758     struct wm_rxqueue *);
    759 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    761 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    762 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    763 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    764 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    765 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    766     struct wm_txqueue *);
    767 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    768     struct wm_rxqueue *);
    769 static int	wm_alloc_txrx_queues(struct wm_softc *);
    770 static void	wm_free_txrx_queues(struct wm_softc *);
    771 static int	wm_init_txrx_queues(struct wm_softc *);
    772 /* Start */
    773 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    774     struct wm_txsoft *, uint32_t *, uint8_t *);
    775 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    776 static void	wm_start(struct ifnet *);
    777 static void	wm_start_locked(struct ifnet *);
    778 static int	wm_transmit(struct ifnet *, struct mbuf *);
    779 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    780 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    781     bool);
    782 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    783     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    784 static void	wm_nq_start(struct ifnet *);
    785 static void	wm_nq_start_locked(struct ifnet *);
    786 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    787 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    788 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    789     bool);
    790 static void	wm_deferred_start_locked(struct wm_txqueue *);
    791 static void	wm_handle_queue(void *);
    792 /* Interrupt */
    793 static bool	wm_txeof(struct wm_txqueue *, u_int);
    794 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    795 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    796 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    797 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    798 static void	wm_linkintr(struct wm_softc *, uint32_t);
    799 static int	wm_intr_legacy(void *);
    800 static inline void	wm_txrxintr_disable(struct wm_queue *);
    801 static inline void	wm_txrxintr_enable(struct wm_queue *);
    802 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    803 static int	wm_txrxintr_msix(void *);
    804 static int	wm_linkintr_msix(void *);
    805 
    806 /*
    807  * Media related.
    808  * GMII, SGMII, TBI, SERDES and SFP.
    809  */
    810 /* Common */
    811 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    812 /* GMII related */
    813 static void	wm_gmii_reset(struct wm_softc *);
    814 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    815 static int	wm_get_phy_id_82575(struct wm_softc *);
    816 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    817 static int	wm_gmii_mediachange(struct ifnet *);
    818 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    819 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    820 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    821 static int	wm_gmii_i82543_readreg(device_t, int, int);
    822 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    823 static int	wm_gmii_mdic_readreg(device_t, int, int);
    824 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    825 static int	wm_gmii_i82544_readreg(device_t, int, int);
    826 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    827 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    828 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    829 static int	wm_gmii_i80003_readreg(device_t, int, int);
    830 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    831 static int	wm_gmii_bm_readreg(device_t, int, int);
    832 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    833 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    834 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    835 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    836 	bool);
    837 static int	wm_gmii_hv_readreg(device_t, int, int);
    838 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    839 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    840 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    841 static int	wm_gmii_82580_readreg(device_t, int, int);
    842 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    843 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    844 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    845 static void	wm_gmii_statchg(struct ifnet *);
    846 /*
    847  * kumeran related (80003, ICH* and PCH*).
    848  * These functions are not for accessing MII registers but for accessing
    849  * kumeran specific registers.
    850  */
    851 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    852 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    853 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    854 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    855 /* SGMII */
    856 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    857 static int	wm_sgmii_readreg(device_t, int, int);
    858 static void	wm_sgmii_writereg(device_t, int, int, int);
    859 /* TBI related */
    860 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    861 static void	wm_tbi_mediainit(struct wm_softc *);
    862 static int	wm_tbi_mediachange(struct ifnet *);
    863 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    864 static int	wm_check_for_link(struct wm_softc *);
    865 static void	wm_tbi_tick(struct wm_softc *);
    866 /* SERDES related */
    867 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    868 static int	wm_serdes_mediachange(struct ifnet *);
    869 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    870 static void	wm_serdes_tick(struct wm_softc *);
    871 /* SFP related */
    872 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    873 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    874 
    875 /*
    876  * NVM related.
    877  * Microwire, SPI (w/wo EERD) and Flash.
    878  */
    879 /* Misc functions */
    880 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    881 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    882 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    883 /* Microwire */
    884 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    885 /* SPI */
    886 static int	wm_nvm_ready_spi(struct wm_softc *);
    887 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    888 /* Using with EERD */
    889 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    890 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    891 /* Flash */
    892 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    893     unsigned int *);
    894 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    895 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    896 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    897     uint32_t *);
    898 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    899 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    900 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    901 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    902 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    903 /* iNVM */
    904 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    905 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    906 /* Lock, detecting NVM type, validate checksum and read */
    907 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    908 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    909 static int	wm_nvm_validate_checksum(struct wm_softc *);
    910 static void	wm_nvm_version_invm(struct wm_softc *);
    911 static void	wm_nvm_version(struct wm_softc *);
    912 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    913 
    914 /*
    915  * Hardware semaphores.
    916  * Very complexed...
    917  */
    918 static int	wm_get_null(struct wm_softc *);
    919 static void	wm_put_null(struct wm_softc *);
    920 static int	wm_get_eecd(struct wm_softc *);
    921 static void	wm_put_eecd(struct wm_softc *);
    922 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    923 static void	wm_put_swsm_semaphore(struct wm_softc *);
    924 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    925 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    926 static int	wm_get_nvm_80003(struct wm_softc *);
    927 static void	wm_put_nvm_80003(struct wm_softc *);
    928 static int	wm_get_nvm_82571(struct wm_softc *);
    929 static void	wm_put_nvm_82571(struct wm_softc *);
    930 static int	wm_get_phy_82575(struct wm_softc *);
    931 static void	wm_put_phy_82575(struct wm_softc *);
    932 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    933 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    934 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    935 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    936 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    937 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    938 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    939 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    940 
    941 /*
    942  * Management mode and power management related subroutines.
    943  * BMC, AMT, suspend/resume and EEE.
    944  */
    945 #if 0
    946 static int	wm_check_mng_mode(struct wm_softc *);
    947 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    948 static int	wm_check_mng_mode_82574(struct wm_softc *);
    949 static int	wm_check_mng_mode_generic(struct wm_softc *);
    950 #endif
    951 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    952 static bool	wm_phy_resetisblocked(struct wm_softc *);
    953 static void	wm_get_hw_control(struct wm_softc *);
    954 static void	wm_release_hw_control(struct wm_softc *);
    955 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    956 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    957 static void	wm_init_manageability(struct wm_softc *);
    958 static void	wm_release_manageability(struct wm_softc *);
    959 static void	wm_get_wakeup(struct wm_softc *);
    960 static int	wm_ulp_disable(struct wm_softc *);
    961 static int	wm_enable_phy_wakeup(struct wm_softc *);
    962 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    963 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    964 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    965 static void	wm_enable_wakeup(struct wm_softc *);
    966 static void	wm_disable_aspm(struct wm_softc *);
    967 /* LPLU (Low Power Link Up) */
    968 static void	wm_lplu_d0_disable(struct wm_softc *);
    969 /* EEE */
    970 static void	wm_set_eee_i350(struct wm_softc *);
    971 
    972 /*
    973  * Workarounds (mainly PHY related).
    974  * Basically, PHY's workarounds are in the PHY drivers.
    975  */
    976 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    977 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    978 static void	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    979 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    980 static void	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    981 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    982 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    983 static int	wm_k1_workaround_lv(struct wm_softc *);
    984 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    985 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    986 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    987 static void	wm_reset_init_script_82575(struct wm_softc *);
    988 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    989 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    990 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    991 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    992 static void	wm_pll_workaround_i210(struct wm_softc *);
    993 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    994 
    995 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    996     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    997 
    998 /*
    999  * Devices supported by this driver.
   1000  */
   1001 static const struct wm_product {
   1002 	pci_vendor_id_t		wmp_vendor;
   1003 	pci_product_id_t	wmp_product;
   1004 	const char		*wmp_name;
   1005 	wm_chip_type		wmp_type;
   1006 	uint32_t		wmp_flags;
   1007 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1008 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1009 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1010 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1011 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1012 } wm_products[] = {
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1014 	  "Intel i82542 1000BASE-X Ethernet",
   1015 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1018 	  "Intel i82543GC 1000BASE-X Ethernet",
   1019 	  WM_T_82543,		WMP_F_FIBER },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1022 	  "Intel i82543GC 1000BASE-T Ethernet",
   1023 	  WM_T_82543,		WMP_F_COPPER },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1026 	  "Intel i82544EI 1000BASE-T Ethernet",
   1027 	  WM_T_82544,		WMP_F_COPPER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1030 	  "Intel i82544EI 1000BASE-X Ethernet",
   1031 	  WM_T_82544,		WMP_F_FIBER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1034 	  "Intel i82544GC 1000BASE-T Ethernet",
   1035 	  WM_T_82544,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1038 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1039 	  WM_T_82544,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1042 	  "Intel i82540EM 1000BASE-T Ethernet",
   1043 	  WM_T_82540,		WMP_F_COPPER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1046 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1047 	  WM_T_82540,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1050 	  "Intel i82540EP 1000BASE-T Ethernet",
   1051 	  WM_T_82540,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1054 	  "Intel i82540EP 1000BASE-T Ethernet",
   1055 	  WM_T_82540,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1058 	  "Intel i82540EP 1000BASE-T Ethernet",
   1059 	  WM_T_82540,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1062 	  "Intel i82545EM 1000BASE-T Ethernet",
   1063 	  WM_T_82545,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1066 	  "Intel i82545GM 1000BASE-T Ethernet",
   1067 	  WM_T_82545_3,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1070 	  "Intel i82545GM 1000BASE-X Ethernet",
   1071 	  WM_T_82545_3,		WMP_F_FIBER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1074 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1075 	  WM_T_82545_3,		WMP_F_SERDES },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1078 	  "Intel i82546EB 1000BASE-T Ethernet",
   1079 	  WM_T_82546,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1082 	  "Intel i82546EB 1000BASE-T Ethernet",
   1083 	  WM_T_82546,		WMP_F_COPPER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1086 	  "Intel i82545EM 1000BASE-X Ethernet",
   1087 	  WM_T_82545,		WMP_F_FIBER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1090 	  "Intel i82546EB 1000BASE-X Ethernet",
   1091 	  WM_T_82546,		WMP_F_FIBER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1094 	  "Intel i82546GB 1000BASE-T Ethernet",
   1095 	  WM_T_82546_3,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1098 	  "Intel i82546GB 1000BASE-X Ethernet",
   1099 	  WM_T_82546_3,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1102 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1103 	  WM_T_82546_3,		WMP_F_SERDES },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1106 	  "i82546GB quad-port Gigabit Ethernet",
   1107 	  WM_T_82546_3,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1110 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1111 	  WM_T_82546_3,		WMP_F_COPPER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1114 	  "Intel PRO/1000MT (82546GB)",
   1115 	  WM_T_82546_3,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1118 	  "Intel i82541EI 1000BASE-T Ethernet",
   1119 	  WM_T_82541,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1122 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1123 	  WM_T_82541,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1126 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1127 	  WM_T_82541,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1130 	  "Intel i82541ER 1000BASE-T Ethernet",
   1131 	  WM_T_82541_2,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1134 	  "Intel i82541GI 1000BASE-T Ethernet",
   1135 	  WM_T_82541_2,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1138 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1139 	  WM_T_82541_2,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1142 	  "Intel i82541PI 1000BASE-T Ethernet",
   1143 	  WM_T_82541_2,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1146 	  "Intel i82547EI 1000BASE-T Ethernet",
   1147 	  WM_T_82547,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1150 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1151 	  WM_T_82547,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1154 	  "Intel i82547GI 1000BASE-T Ethernet",
   1155 	  WM_T_82547_2,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1158 	  "Intel PRO/1000 PT (82571EB)",
   1159 	  WM_T_82571,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1162 	  "Intel PRO/1000 PF (82571EB)",
   1163 	  WM_T_82571,		WMP_F_FIBER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1166 	  "Intel PRO/1000 PB (82571EB)",
   1167 	  WM_T_82571,		WMP_F_SERDES },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1170 	  "Intel PRO/1000 QT (82571EB)",
   1171 	  WM_T_82571,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1174 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1175 	  WM_T_82571,		WMP_F_COPPER, },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1178 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1179 	  WM_T_82571,		WMP_F_COPPER, },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1182 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1183 	  WM_T_82571,		WMP_F_SERDES, },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1186 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1187 	  WM_T_82571,		WMP_F_SERDES, },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1190 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1191 	  WM_T_82571,		WMP_F_FIBER, },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1194 	  "Intel i82572EI 1000baseT Ethernet",
   1195 	  WM_T_82572,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1198 	  "Intel i82572EI 1000baseX Ethernet",
   1199 	  WM_T_82572,		WMP_F_FIBER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1202 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1203 	  WM_T_82572,		WMP_F_SERDES },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1206 	  "Intel i82572EI 1000baseT Ethernet",
   1207 	  WM_T_82572,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1210 	  "Intel i82573E",
   1211 	  WM_T_82573,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1214 	  "Intel i82573E IAMT",
   1215 	  WM_T_82573,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1218 	  "Intel i82573L Gigabit Ethernet",
   1219 	  WM_T_82573,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1222 	  "Intel i82574L",
   1223 	  WM_T_82574,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1226 	  "Intel i82574L",
   1227 	  WM_T_82574,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1230 	  "Intel i82583V",
   1231 	  WM_T_82583,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1234 	  "i80003 dual 1000baseT Ethernet",
   1235 	  WM_T_80003,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1238 	  "i80003 dual 1000baseX Ethernet",
   1239 	  WM_T_80003,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1242 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1243 	  WM_T_80003,		WMP_F_SERDES },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1246 	  "Intel i80003 1000baseT Ethernet",
   1247 	  WM_T_80003,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1250 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1251 	  WM_T_80003,		WMP_F_SERDES },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1254 	  "Intel i82801H (M_AMT) LAN Controller",
   1255 	  WM_T_ICH8,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1257 	  "Intel i82801H (AMT) LAN Controller",
   1258 	  WM_T_ICH8,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1260 	  "Intel i82801H LAN Controller",
   1261 	  WM_T_ICH8,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1263 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1264 	  WM_T_ICH8,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1266 	  "Intel i82801H (M) LAN Controller",
   1267 	  WM_T_ICH8,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1269 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1270 	  WM_T_ICH8,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1272 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1273 	  WM_T_ICH8,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1275 	  "82567V-3 LAN Controller",
   1276 	  WM_T_ICH8,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1278 	  "82801I (AMT) LAN Controller",
   1279 	  WM_T_ICH9,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1281 	  "82801I 10/100 LAN Controller",
   1282 	  WM_T_ICH9,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1284 	  "82801I (G) 10/100 LAN Controller",
   1285 	  WM_T_ICH9,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1287 	  "82801I (GT) 10/100 LAN Controller",
   1288 	  WM_T_ICH9,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1290 	  "82801I (C) LAN Controller",
   1291 	  WM_T_ICH9,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1293 	  "82801I mobile LAN Controller",
   1294 	  WM_T_ICH9,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1296 	  "82801I mobile (V) LAN Controller",
   1297 	  WM_T_ICH9,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1299 	  "82801I mobile (AMT) LAN Controller",
   1300 	  WM_T_ICH9,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1302 	  "82567LM-4 LAN Controller",
   1303 	  WM_T_ICH9,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1305 	  "82567LM-2 LAN Controller",
   1306 	  WM_T_ICH10,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1308 	  "82567LF-2 LAN Controller",
   1309 	  WM_T_ICH10,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1311 	  "82567LM-3 LAN Controller",
   1312 	  WM_T_ICH10,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1314 	  "82567LF-3 LAN Controller",
   1315 	  WM_T_ICH10,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1317 	  "82567V-2 LAN Controller",
   1318 	  WM_T_ICH10,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1320 	  "82567V-3? LAN Controller",
   1321 	  WM_T_ICH10,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1323 	  "HANKSVILLE LAN Controller",
   1324 	  WM_T_ICH10,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1326 	  "PCH LAN (82577LM) Controller",
   1327 	  WM_T_PCH,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1329 	  "PCH LAN (82577LC) Controller",
   1330 	  WM_T_PCH,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1332 	  "PCH LAN (82578DM) Controller",
   1333 	  WM_T_PCH,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1335 	  "PCH LAN (82578DC) Controller",
   1336 	  WM_T_PCH,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1338 	  "PCH2 LAN (82579LM) Controller",
   1339 	  WM_T_PCH2,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1341 	  "PCH2 LAN (82579V) Controller",
   1342 	  WM_T_PCH2,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1344 	  "82575EB dual-1000baseT Ethernet",
   1345 	  WM_T_82575,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1347 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1348 	  WM_T_82575,		WMP_F_SERDES },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1350 	  "82575GB quad-1000baseT Ethernet",
   1351 	  WM_T_82575,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1353 	  "82575GB quad-1000baseT Ethernet (PM)",
   1354 	  WM_T_82575,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1356 	  "82576 1000BaseT Ethernet",
   1357 	  WM_T_82576,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1359 	  "82576 1000BaseX Ethernet",
   1360 	  WM_T_82576,		WMP_F_FIBER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1363 	  "82576 gigabit Ethernet (SERDES)",
   1364 	  WM_T_82576,		WMP_F_SERDES },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1367 	  "82576 quad-1000BaseT Ethernet",
   1368 	  WM_T_82576,		WMP_F_COPPER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1371 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1372 	  WM_T_82576,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1375 	  "82576 gigabit Ethernet",
   1376 	  WM_T_82576,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1379 	  "82576 gigabit Ethernet (SERDES)",
   1380 	  WM_T_82576,		WMP_F_SERDES },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1382 	  "82576 quad-gigabit Ethernet (SERDES)",
   1383 	  WM_T_82576,		WMP_F_SERDES },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1386 	  "82580 1000BaseT Ethernet",
   1387 	  WM_T_82580,		WMP_F_COPPER },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1389 	  "82580 1000BaseX Ethernet",
   1390 	  WM_T_82580,		WMP_F_FIBER },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1393 	  "82580 1000BaseT Ethernet (SERDES)",
   1394 	  WM_T_82580,		WMP_F_SERDES },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1397 	  "82580 gigabit Ethernet (SGMII)",
   1398 	  WM_T_82580,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1400 	  "82580 dual-1000BaseT Ethernet",
   1401 	  WM_T_82580,		WMP_F_COPPER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1404 	  "82580 quad-1000BaseX Ethernet",
   1405 	  WM_T_82580,		WMP_F_FIBER },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1408 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1409 	  WM_T_82580,		WMP_F_COPPER },
   1410 
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1412 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1413 	  WM_T_82580,		WMP_F_SERDES },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1416 	  "DH89XXCC 1000BASE-KX Ethernet",
   1417 	  WM_T_82580,		WMP_F_SERDES },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1420 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1421 	  WM_T_82580,		WMP_F_SERDES },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1424 	  "I350 Gigabit Network Connection",
   1425 	  WM_T_I350,		WMP_F_COPPER },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1428 	  "I350 Gigabit Fiber Network Connection",
   1429 	  WM_T_I350,		WMP_F_FIBER },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1432 	  "I350 Gigabit Backplane Connection",
   1433 	  WM_T_I350,		WMP_F_SERDES },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1436 	  "I350 Quad Port Gigabit Ethernet",
   1437 	  WM_T_I350,		WMP_F_SERDES },
   1438 
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1440 	  "I350 Gigabit Connection",
   1441 	  WM_T_I350,		WMP_F_COPPER },
   1442 
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1444 	  "I354 Gigabit Ethernet (KX)",
   1445 	  WM_T_I354,		WMP_F_SERDES },
   1446 
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1448 	  "I354 Gigabit Ethernet (SGMII)",
   1449 	  WM_T_I354,		WMP_F_COPPER },
   1450 
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1452 	  "I354 Gigabit Ethernet (2.5G)",
   1453 	  WM_T_I354,		WMP_F_COPPER },
   1454 
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1456 	  "I210-T1 Ethernet Server Adapter",
   1457 	  WM_T_I210,		WMP_F_COPPER },
   1458 
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1460 	  "I210 Ethernet (Copper OEM)",
   1461 	  WM_T_I210,		WMP_F_COPPER },
   1462 
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1464 	  "I210 Ethernet (Copper IT)",
   1465 	  WM_T_I210,		WMP_F_COPPER },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1468 	  "I210 Ethernet (FLASH less)",
   1469 	  WM_T_I210,		WMP_F_COPPER },
   1470 
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1472 	  "I210 Gigabit Ethernet (Fiber)",
   1473 	  WM_T_I210,		WMP_F_FIBER },
   1474 
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1476 	  "I210 Gigabit Ethernet (SERDES)",
   1477 	  WM_T_I210,		WMP_F_SERDES },
   1478 
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1480 	  "I210 Gigabit Ethernet (FLASH less)",
   1481 	  WM_T_I210,		WMP_F_SERDES },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1484 	  "I210 Gigabit Ethernet (SGMII)",
   1485 	  WM_T_I210,		WMP_F_COPPER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1488 	  "I211 Ethernet (COPPER)",
   1489 	  WM_T_I211,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1491 	  "I217 V Ethernet Connection",
   1492 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1494 	  "I217 LM Ethernet Connection",
   1495 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1497 	  "I218 V Ethernet Connection",
   1498 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1500 	  "I218 V Ethernet Connection",
   1501 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1503 	  "I218 V Ethernet Connection",
   1504 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1506 	  "I218 LM Ethernet Connection",
   1507 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1509 	  "I218 LM Ethernet Connection",
   1510 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1512 	  "I218 LM Ethernet Connection",
   1513 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1515 	  "I219 V Ethernet Connection",
   1516 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1518 	  "I219 V Ethernet Connection",
   1519 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1521 	  "I219 V Ethernet Connection",
   1522 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1524 	  "I219 V Ethernet Connection",
   1525 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1527 	  "I219 LM Ethernet Connection",
   1528 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1530 	  "I219 LM Ethernet Connection",
   1531 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1533 	  "I219 LM Ethernet Connection",
   1534 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1536 	  "I219 LM Ethernet Connection",
   1537 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1539 	  "I219 LM Ethernet Connection",
   1540 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1542 	  "I219 V Ethernet Connection",
   1543 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1545 	  "I219 V Ethernet Connection",
   1546 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1548 	  "I219 LM Ethernet Connection",
   1549 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1551 	  "I219 LM Ethernet Connection",
   1552 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1553 	{ 0,			0,
   1554 	  NULL,
   1555 	  0,			0 },
   1556 };
   1557 
   1558 /*
   1559  * Register read/write functions.
   1560  * Other than CSR_{READ|WRITE}().
   1561  */
   1562 
   1563 #if 0 /* Not currently used */
   1564 static inline uint32_t
   1565 wm_io_read(struct wm_softc *sc, int reg)
   1566 {
   1567 
   1568 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1569 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1570 }
   1571 #endif
   1572 
   1573 static inline void
   1574 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1575 {
   1576 
   1577 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1578 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1579 }
   1580 
   1581 static inline void
   1582 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1583     uint32_t data)
   1584 {
   1585 	uint32_t regval;
   1586 	int i;
   1587 
   1588 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1589 
   1590 	CSR_WRITE(sc, reg, regval);
   1591 
   1592 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1593 		delay(5);
   1594 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1595 			break;
   1596 	}
   1597 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1598 		aprint_error("%s: WARNING:"
   1599 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1600 		    device_xname(sc->sc_dev), reg);
   1601 	}
   1602 }
   1603 
   1604 static inline void
   1605 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1606 {
   1607 	wa->wa_low = htole32(v & 0xffffffffU);
   1608 	if (sizeof(bus_addr_t) == 8)
   1609 		wa->wa_high = htole32((uint64_t) v >> 32);
   1610 	else
   1611 		wa->wa_high = 0;
   1612 }
   1613 
   1614 /*
   1615  * Descriptor sync/init functions.
   1616  */
   1617 static inline void
   1618 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1619 {
   1620 	struct wm_softc *sc = txq->txq_sc;
   1621 
   1622 	/* If it will wrap around, sync to the end of the ring. */
   1623 	if ((start + num) > WM_NTXDESC(txq)) {
   1624 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1625 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1626 		    (WM_NTXDESC(txq) - start), ops);
   1627 		num -= (WM_NTXDESC(txq) - start);
   1628 		start = 0;
   1629 	}
   1630 
   1631 	/* Now sync whatever is left. */
   1632 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1633 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1634 }
   1635 
   1636 static inline void
   1637 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1638 {
   1639 	struct wm_softc *sc = rxq->rxq_sc;
   1640 
   1641 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1642 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1643 }
   1644 
   1645 static inline void
   1646 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1647 {
   1648 	struct wm_softc *sc = rxq->rxq_sc;
   1649 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1650 	struct mbuf *m = rxs->rxs_mbuf;
   1651 
   1652 	/*
   1653 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1654 	 * so that the payload after the Ethernet header is aligned
   1655 	 * to a 4-byte boundary.
   1656 
   1657 	 * XXX BRAINDAMAGE ALERT!
   1658 	 * The stupid chip uses the same size for every buffer, which
   1659 	 * is set in the Receive Control register.  We are using the 2K
   1660 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1661 	 * reason, we can't "scoot" packets longer than the standard
   1662 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1663 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1664 	 * the upper layer copy the headers.
   1665 	 */
   1666 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1667 
   1668 	if (sc->sc_type == WM_T_82574) {
   1669 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1670 		rxd->erx_data.erxd_addr =
   1671 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1672 		rxd->erx_data.erxd_dd = 0;
   1673 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1674 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1675 
   1676 		rxd->nqrx_data.nrxd_paddr =
   1677 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1678 		/* Currently, split header is not supported. */
   1679 		rxd->nqrx_data.nrxd_haddr = 0;
   1680 	} else {
   1681 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1682 
   1683 		wm_set_dma_addr(&rxd->wrx_addr,
   1684 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1685 		rxd->wrx_len = 0;
   1686 		rxd->wrx_cksum = 0;
   1687 		rxd->wrx_status = 0;
   1688 		rxd->wrx_errors = 0;
   1689 		rxd->wrx_special = 0;
   1690 	}
   1691 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1692 
   1693 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1694 }
   1695 
   1696 /*
   1697  * Device driver interface functions and commonly used functions.
   1698  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1699  */
   1700 
   1701 /* Lookup supported device table */
   1702 static const struct wm_product *
   1703 wm_lookup(const struct pci_attach_args *pa)
   1704 {
   1705 	const struct wm_product *wmp;
   1706 
   1707 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1708 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1709 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1710 			return wmp;
   1711 	}
   1712 	return NULL;
   1713 }
   1714 
   1715 /* The match function (ca_match) */
   1716 static int
   1717 wm_match(device_t parent, cfdata_t cf, void *aux)
   1718 {
   1719 	struct pci_attach_args *pa = aux;
   1720 
   1721 	if (wm_lookup(pa) != NULL)
   1722 		return 1;
   1723 
   1724 	return 0;
   1725 }
   1726 
   1727 /* The attach function (ca_attach) */
   1728 static void
   1729 wm_attach(device_t parent, device_t self, void *aux)
   1730 {
   1731 	struct wm_softc *sc = device_private(self);
   1732 	struct pci_attach_args *pa = aux;
   1733 	prop_dictionary_t dict;
   1734 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1735 	pci_chipset_tag_t pc = pa->pa_pc;
   1736 	int counts[PCI_INTR_TYPE_SIZE];
   1737 	pci_intr_type_t max_type;
   1738 	const char *eetype, *xname;
   1739 	bus_space_tag_t memt;
   1740 	bus_space_handle_t memh;
   1741 	bus_size_t memsize;
   1742 	int memh_valid;
   1743 	int i, error;
   1744 	const struct wm_product *wmp;
   1745 	prop_data_t ea;
   1746 	prop_number_t pn;
   1747 	uint8_t enaddr[ETHER_ADDR_LEN];
   1748 	char buf[256];
   1749 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1750 	pcireg_t preg, memtype;
   1751 	uint16_t eeprom_data, apme_mask;
   1752 	bool force_clear_smbi;
   1753 	uint32_t link_mode;
   1754 	uint32_t reg;
   1755 
   1756 	sc->sc_dev = self;
   1757 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1758 	sc->sc_core_stopping = false;
   1759 
   1760 	wmp = wm_lookup(pa);
   1761 #ifdef DIAGNOSTIC
   1762 	if (wmp == NULL) {
   1763 		printf("\n");
   1764 		panic("wm_attach: impossible");
   1765 	}
   1766 #endif
   1767 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1768 
   1769 	sc->sc_pc = pa->pa_pc;
   1770 	sc->sc_pcitag = pa->pa_tag;
   1771 
   1772 	if (pci_dma64_available(pa))
   1773 		sc->sc_dmat = pa->pa_dmat64;
   1774 	else
   1775 		sc->sc_dmat = pa->pa_dmat;
   1776 
   1777 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1778 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1779 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1780 
   1781 	sc->sc_type = wmp->wmp_type;
   1782 
   1783 	/* Set default function pointers */
   1784 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1785 	sc->phy.release = sc->nvm.release = wm_put_null;
   1786 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1787 
   1788 	if (sc->sc_type < WM_T_82543) {
   1789 		if (sc->sc_rev < 2) {
   1790 			aprint_error_dev(sc->sc_dev,
   1791 			    "i82542 must be at least rev. 2\n");
   1792 			return;
   1793 		}
   1794 		if (sc->sc_rev < 3)
   1795 			sc->sc_type = WM_T_82542_2_0;
   1796 	}
   1797 
   1798 	/*
   1799 	 * Disable MSI for Errata:
   1800 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1801 	 *
   1802 	 *  82544: Errata 25
   1803 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1804 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1805 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1806 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1807 	 *
   1808 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1809 	 *
   1810 	 *  82571 & 82572: Errata 63
   1811 	 */
   1812 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1813 	    || (sc->sc_type == WM_T_82572))
   1814 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1815 
   1816 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1817 	    || (sc->sc_type == WM_T_82580)
   1818 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1819 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1820 		sc->sc_flags |= WM_F_NEWQUEUE;
   1821 
   1822 	/* Set device properties (mactype) */
   1823 	dict = device_properties(sc->sc_dev);
   1824 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1825 
   1826 	/*
   1827 	 * Map the device.  All devices support memory-mapped acccess,
   1828 	 * and it is really required for normal operation.
   1829 	 */
   1830 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1831 	switch (memtype) {
   1832 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1833 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1834 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1835 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1836 		break;
   1837 	default:
   1838 		memh_valid = 0;
   1839 		break;
   1840 	}
   1841 
   1842 	if (memh_valid) {
   1843 		sc->sc_st = memt;
   1844 		sc->sc_sh = memh;
   1845 		sc->sc_ss = memsize;
   1846 	} else {
   1847 		aprint_error_dev(sc->sc_dev,
   1848 		    "unable to map device registers\n");
   1849 		return;
   1850 	}
   1851 
   1852 	/*
   1853 	 * In addition, i82544 and later support I/O mapped indirect
   1854 	 * register access.  It is not desirable (nor supported in
   1855 	 * this driver) to use it for normal operation, though it is
   1856 	 * required to work around bugs in some chip versions.
   1857 	 */
   1858 	if (sc->sc_type >= WM_T_82544) {
   1859 		/* First we have to find the I/O BAR. */
   1860 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1861 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1862 			if (memtype == PCI_MAPREG_TYPE_IO)
   1863 				break;
   1864 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1865 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1866 				i += 4;	/* skip high bits, too */
   1867 		}
   1868 		if (i < PCI_MAPREG_END) {
   1869 			/*
   1870 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1871 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1872 			 * It's no problem because newer chips has no this
   1873 			 * bug.
   1874 			 *
   1875 			 * The i8254x doesn't apparently respond when the
   1876 			 * I/O BAR is 0, which looks somewhat like it's not
   1877 			 * been configured.
   1878 			 */
   1879 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1880 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1881 				aprint_error_dev(sc->sc_dev,
   1882 				    "WARNING: I/O BAR at zero.\n");
   1883 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1884 					0, &sc->sc_iot, &sc->sc_ioh,
   1885 					NULL, &sc->sc_ios) == 0) {
   1886 				sc->sc_flags |= WM_F_IOH_VALID;
   1887 			} else
   1888 				aprint_error_dev(sc->sc_dev,
   1889 				    "WARNING: unable to map I/O space\n");
   1890 		}
   1891 
   1892 	}
   1893 
   1894 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1895 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1896 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1897 	if (sc->sc_type < WM_T_82542_2_1)
   1898 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1899 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1900 
   1901 	/* power up chip */
   1902 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1903 	    && error != EOPNOTSUPP) {
   1904 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1905 		return;
   1906 	}
   1907 
   1908 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1909 	/*
   1910 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1911 	 * resource.
   1912 	 */
   1913 	if (sc->sc_nqueues > 1) {
   1914 		max_type = PCI_INTR_TYPE_MSIX;
   1915 		/*
   1916 		 *  82583 has a MSI-X capability in the PCI configuration space
   1917 		 * but it doesn't support it. At least the document doesn't
   1918 		 * say anything about MSI-X.
   1919 		 */
   1920 		counts[PCI_INTR_TYPE_MSIX]
   1921 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1922 	} else {
   1923 		max_type = PCI_INTR_TYPE_MSI;
   1924 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1925 	}
   1926 
   1927 	/* Allocation settings */
   1928 	counts[PCI_INTR_TYPE_MSI] = 1;
   1929 	counts[PCI_INTR_TYPE_INTX] = 1;
   1930 	/* overridden by disable flags */
   1931 	if (wm_disable_msi != 0) {
   1932 		counts[PCI_INTR_TYPE_MSI] = 0;
   1933 		if (wm_disable_msix != 0) {
   1934 			max_type = PCI_INTR_TYPE_INTX;
   1935 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1936 		}
   1937 	} else if (wm_disable_msix != 0) {
   1938 		max_type = PCI_INTR_TYPE_MSI;
   1939 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1940 	}
   1941 
   1942 alloc_retry:
   1943 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1944 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1945 		return;
   1946 	}
   1947 
   1948 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1949 		error = wm_setup_msix(sc);
   1950 		if (error) {
   1951 			pci_intr_release(pc, sc->sc_intrs,
   1952 			    counts[PCI_INTR_TYPE_MSIX]);
   1953 
   1954 			/* Setup for MSI: Disable MSI-X */
   1955 			max_type = PCI_INTR_TYPE_MSI;
   1956 			counts[PCI_INTR_TYPE_MSI] = 1;
   1957 			counts[PCI_INTR_TYPE_INTX] = 1;
   1958 			goto alloc_retry;
   1959 		}
   1960 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1961 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1962 		error = wm_setup_legacy(sc);
   1963 		if (error) {
   1964 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1965 			    counts[PCI_INTR_TYPE_MSI]);
   1966 
   1967 			/* The next try is for INTx: Disable MSI */
   1968 			max_type = PCI_INTR_TYPE_INTX;
   1969 			counts[PCI_INTR_TYPE_INTX] = 1;
   1970 			goto alloc_retry;
   1971 		}
   1972 	} else {
   1973 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1974 		error = wm_setup_legacy(sc);
   1975 		if (error) {
   1976 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1977 			    counts[PCI_INTR_TYPE_INTX]);
   1978 			return;
   1979 		}
   1980 	}
   1981 
   1982 	/*
   1983 	 * Check the function ID (unit number of the chip).
   1984 	 */
   1985 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1986 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1987 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1988 	    || (sc->sc_type == WM_T_82580)
   1989 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1990 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1991 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1992 	else
   1993 		sc->sc_funcid = 0;
   1994 
   1995 	/*
   1996 	 * Determine a few things about the bus we're connected to.
   1997 	 */
   1998 	if (sc->sc_type < WM_T_82543) {
   1999 		/* We don't really know the bus characteristics here. */
   2000 		sc->sc_bus_speed = 33;
   2001 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2002 		/*
   2003 		 * CSA (Communication Streaming Architecture) is about as fast
   2004 		 * a 32-bit 66MHz PCI Bus.
   2005 		 */
   2006 		sc->sc_flags |= WM_F_CSA;
   2007 		sc->sc_bus_speed = 66;
   2008 		aprint_verbose_dev(sc->sc_dev,
   2009 		    "Communication Streaming Architecture\n");
   2010 		if (sc->sc_type == WM_T_82547) {
   2011 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2012 			callout_setfunc(&sc->sc_txfifo_ch,
   2013 			    wm_82547_txfifo_stall, sc);
   2014 			aprint_verbose_dev(sc->sc_dev,
   2015 			    "using 82547 Tx FIFO stall work-around\n");
   2016 		}
   2017 	} else if (sc->sc_type >= WM_T_82571) {
   2018 		sc->sc_flags |= WM_F_PCIE;
   2019 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2020 		    && (sc->sc_type != WM_T_ICH10)
   2021 		    && (sc->sc_type != WM_T_PCH)
   2022 		    && (sc->sc_type != WM_T_PCH2)
   2023 		    && (sc->sc_type != WM_T_PCH_LPT)
   2024 		    && (sc->sc_type != WM_T_PCH_SPT)
   2025 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2026 			/* ICH* and PCH* have no PCIe capability registers */
   2027 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2028 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2029 				NULL) == 0)
   2030 				aprint_error_dev(sc->sc_dev,
   2031 				    "unable to find PCIe capability\n");
   2032 		}
   2033 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2034 	} else {
   2035 		reg = CSR_READ(sc, WMREG_STATUS);
   2036 		if (reg & STATUS_BUS64)
   2037 			sc->sc_flags |= WM_F_BUS64;
   2038 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2039 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2040 
   2041 			sc->sc_flags |= WM_F_PCIX;
   2042 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2043 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2044 				aprint_error_dev(sc->sc_dev,
   2045 				    "unable to find PCIX capability\n");
   2046 			else if (sc->sc_type != WM_T_82545_3 &&
   2047 				 sc->sc_type != WM_T_82546_3) {
   2048 				/*
   2049 				 * Work around a problem caused by the BIOS
   2050 				 * setting the max memory read byte count
   2051 				 * incorrectly.
   2052 				 */
   2053 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2054 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2055 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2056 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2057 
   2058 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2059 				    PCIX_CMD_BYTECNT_SHIFT;
   2060 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2061 				    PCIX_STATUS_MAXB_SHIFT;
   2062 				if (bytecnt > maxb) {
   2063 					aprint_verbose_dev(sc->sc_dev,
   2064 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2065 					    512 << bytecnt, 512 << maxb);
   2066 					pcix_cmd = (pcix_cmd &
   2067 					    ~PCIX_CMD_BYTECNT_MASK) |
   2068 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2069 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2070 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2071 					    pcix_cmd);
   2072 				}
   2073 			}
   2074 		}
   2075 		/*
   2076 		 * The quad port adapter is special; it has a PCIX-PCIX
   2077 		 * bridge on the board, and can run the secondary bus at
   2078 		 * a higher speed.
   2079 		 */
   2080 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2081 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2082 								      : 66;
   2083 		} else if (sc->sc_flags & WM_F_PCIX) {
   2084 			switch (reg & STATUS_PCIXSPD_MASK) {
   2085 			case STATUS_PCIXSPD_50_66:
   2086 				sc->sc_bus_speed = 66;
   2087 				break;
   2088 			case STATUS_PCIXSPD_66_100:
   2089 				sc->sc_bus_speed = 100;
   2090 				break;
   2091 			case STATUS_PCIXSPD_100_133:
   2092 				sc->sc_bus_speed = 133;
   2093 				break;
   2094 			default:
   2095 				aprint_error_dev(sc->sc_dev,
   2096 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2097 				    reg & STATUS_PCIXSPD_MASK);
   2098 				sc->sc_bus_speed = 66;
   2099 				break;
   2100 			}
   2101 		} else
   2102 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2103 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2104 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2105 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2106 	}
   2107 
   2108 	/* clear interesting stat counters */
   2109 	CSR_READ(sc, WMREG_COLC);
   2110 	CSR_READ(sc, WMREG_RXERRC);
   2111 
   2112 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2113 	    || (sc->sc_type >= WM_T_ICH8))
   2114 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2115 	if (sc->sc_type >= WM_T_ICH8)
   2116 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2117 
   2118 	/* Set PHY, NVM mutex related stuff */
   2119 	switch (sc->sc_type) {
   2120 	case WM_T_82542_2_0:
   2121 	case WM_T_82542_2_1:
   2122 	case WM_T_82543:
   2123 	case WM_T_82544:
   2124 		/* Microwire */
   2125 		sc->nvm.read = wm_nvm_read_uwire;
   2126 		sc->sc_nvm_wordsize = 64;
   2127 		sc->sc_nvm_addrbits = 6;
   2128 		break;
   2129 	case WM_T_82540:
   2130 	case WM_T_82545:
   2131 	case WM_T_82545_3:
   2132 	case WM_T_82546:
   2133 	case WM_T_82546_3:
   2134 		/* Microwire */
   2135 		sc->nvm.read = wm_nvm_read_uwire;
   2136 		reg = CSR_READ(sc, WMREG_EECD);
   2137 		if (reg & EECD_EE_SIZE) {
   2138 			sc->sc_nvm_wordsize = 256;
   2139 			sc->sc_nvm_addrbits = 8;
   2140 		} else {
   2141 			sc->sc_nvm_wordsize = 64;
   2142 			sc->sc_nvm_addrbits = 6;
   2143 		}
   2144 		sc->sc_flags |= WM_F_LOCK_EECD;
   2145 		sc->nvm.acquire = wm_get_eecd;
   2146 		sc->nvm.release = wm_put_eecd;
   2147 		break;
   2148 	case WM_T_82541:
   2149 	case WM_T_82541_2:
   2150 	case WM_T_82547:
   2151 	case WM_T_82547_2:
   2152 		reg = CSR_READ(sc, WMREG_EECD);
   2153 		/*
   2154 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2155 		 * on 8254[17], so set flags and functios before calling it.
   2156 		 */
   2157 		sc->sc_flags |= WM_F_LOCK_EECD;
   2158 		sc->nvm.acquire = wm_get_eecd;
   2159 		sc->nvm.release = wm_put_eecd;
   2160 		if (reg & EECD_EE_TYPE) {
   2161 			/* SPI */
   2162 			sc->nvm.read = wm_nvm_read_spi;
   2163 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2164 			wm_nvm_set_addrbits_size_eecd(sc);
   2165 		} else {
   2166 			/* Microwire */
   2167 			sc->nvm.read = wm_nvm_read_uwire;
   2168 			if ((reg & EECD_EE_ABITS) != 0) {
   2169 				sc->sc_nvm_wordsize = 256;
   2170 				sc->sc_nvm_addrbits = 8;
   2171 			} else {
   2172 				sc->sc_nvm_wordsize = 64;
   2173 				sc->sc_nvm_addrbits = 6;
   2174 			}
   2175 		}
   2176 		break;
   2177 	case WM_T_82571:
   2178 	case WM_T_82572:
   2179 		/* SPI */
   2180 		sc->nvm.read = wm_nvm_read_eerd;
   2181 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2182 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2183 		wm_nvm_set_addrbits_size_eecd(sc);
   2184 		sc->phy.acquire = wm_get_swsm_semaphore;
   2185 		sc->phy.release = wm_put_swsm_semaphore;
   2186 		sc->nvm.acquire = wm_get_nvm_82571;
   2187 		sc->nvm.release = wm_put_nvm_82571;
   2188 		break;
   2189 	case WM_T_82573:
   2190 	case WM_T_82574:
   2191 	case WM_T_82583:
   2192 		sc->nvm.read = wm_nvm_read_eerd;
   2193 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2194 		if (sc->sc_type == WM_T_82573) {
   2195 			sc->phy.acquire = wm_get_swsm_semaphore;
   2196 			sc->phy.release = wm_put_swsm_semaphore;
   2197 			sc->nvm.acquire = wm_get_nvm_82571;
   2198 			sc->nvm.release = wm_put_nvm_82571;
   2199 		} else {
   2200 			/* Both PHY and NVM use the same semaphore. */
   2201 			sc->phy.acquire = sc->nvm.acquire
   2202 			    = wm_get_swfwhw_semaphore;
   2203 			sc->phy.release = sc->nvm.release
   2204 			    = wm_put_swfwhw_semaphore;
   2205 		}
   2206 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2207 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2208 			sc->sc_nvm_wordsize = 2048;
   2209 		} else {
   2210 			/* SPI */
   2211 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2212 			wm_nvm_set_addrbits_size_eecd(sc);
   2213 		}
   2214 		break;
   2215 	case WM_T_82575:
   2216 	case WM_T_82576:
   2217 	case WM_T_82580:
   2218 	case WM_T_I350:
   2219 	case WM_T_I354:
   2220 	case WM_T_80003:
   2221 		/* SPI */
   2222 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2223 		wm_nvm_set_addrbits_size_eecd(sc);
   2224 		if ((sc->sc_type == WM_T_80003)
   2225 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2226 			sc->nvm.read = wm_nvm_read_eerd;
   2227 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2228 		} else {
   2229 			sc->nvm.read = wm_nvm_read_spi;
   2230 			sc->sc_flags |= WM_F_LOCK_EECD;
   2231 		}
   2232 		sc->phy.acquire = wm_get_phy_82575;
   2233 		sc->phy.release = wm_put_phy_82575;
   2234 		sc->nvm.acquire = wm_get_nvm_80003;
   2235 		sc->nvm.release = wm_put_nvm_80003;
   2236 		break;
   2237 	case WM_T_ICH8:
   2238 	case WM_T_ICH9:
   2239 	case WM_T_ICH10:
   2240 	case WM_T_PCH:
   2241 	case WM_T_PCH2:
   2242 	case WM_T_PCH_LPT:
   2243 		sc->nvm.read = wm_nvm_read_ich8;
   2244 		/* FLASH */
   2245 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2246 		sc->sc_nvm_wordsize = 2048;
   2247 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2248 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2249 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2250 			aprint_error_dev(sc->sc_dev,
   2251 			    "can't map FLASH registers\n");
   2252 			goto out;
   2253 		}
   2254 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2255 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2256 		    ICH_FLASH_SECTOR_SIZE;
   2257 		sc->sc_ich8_flash_bank_size =
   2258 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2259 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2260 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2261 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2262 		sc->sc_flashreg_offset = 0;
   2263 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2264 		sc->phy.release = wm_put_swflag_ich8lan;
   2265 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2266 		sc->nvm.release = wm_put_nvm_ich8lan;
   2267 		break;
   2268 	case WM_T_PCH_SPT:
   2269 	case WM_T_PCH_CNP:
   2270 		sc->nvm.read = wm_nvm_read_spt;
   2271 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2272 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2273 		sc->sc_flasht = sc->sc_st;
   2274 		sc->sc_flashh = sc->sc_sh;
   2275 		sc->sc_ich8_flash_base = 0;
   2276 		sc->sc_nvm_wordsize =
   2277 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2278 		    * NVM_SIZE_MULTIPLIER;
   2279 		/* It is size in bytes, we want words */
   2280 		sc->sc_nvm_wordsize /= 2;
   2281 		/* assume 2 banks */
   2282 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2283 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2284 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2285 		sc->phy.release = wm_put_swflag_ich8lan;
   2286 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2287 		sc->nvm.release = wm_put_nvm_ich8lan;
   2288 		break;
   2289 	case WM_T_I210:
   2290 	case WM_T_I211:
   2291 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2292 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2293 		if (wm_nvm_flash_presence_i210(sc)) {
   2294 			sc->nvm.read = wm_nvm_read_eerd;
   2295 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2296 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2297 			wm_nvm_set_addrbits_size_eecd(sc);
   2298 		} else {
   2299 			sc->nvm.read = wm_nvm_read_invm;
   2300 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2301 			sc->sc_nvm_wordsize = INVM_SIZE;
   2302 		}
   2303 		sc->phy.acquire = wm_get_phy_82575;
   2304 		sc->phy.release = wm_put_phy_82575;
   2305 		sc->nvm.acquire = wm_get_nvm_80003;
   2306 		sc->nvm.release = wm_put_nvm_80003;
   2307 		break;
   2308 	default:
   2309 		break;
   2310 	}
   2311 
   2312 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2313 	switch (sc->sc_type) {
   2314 	case WM_T_82571:
   2315 	case WM_T_82572:
   2316 		reg = CSR_READ(sc, WMREG_SWSM2);
   2317 		if ((reg & SWSM2_LOCK) == 0) {
   2318 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2319 			force_clear_smbi = true;
   2320 		} else
   2321 			force_clear_smbi = false;
   2322 		break;
   2323 	case WM_T_82573:
   2324 	case WM_T_82574:
   2325 	case WM_T_82583:
   2326 		force_clear_smbi = true;
   2327 		break;
   2328 	default:
   2329 		force_clear_smbi = false;
   2330 		break;
   2331 	}
   2332 	if (force_clear_smbi) {
   2333 		reg = CSR_READ(sc, WMREG_SWSM);
   2334 		if ((reg & SWSM_SMBI) != 0)
   2335 			aprint_error_dev(sc->sc_dev,
   2336 			    "Please update the Bootagent\n");
   2337 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2338 	}
   2339 
   2340 	/*
   2341 	 * Defer printing the EEPROM type until after verifying the checksum
   2342 	 * This allows the EEPROM type to be printed correctly in the case
   2343 	 * that no EEPROM is attached.
   2344 	 */
   2345 	/*
   2346 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2347 	 * this for later, so we can fail future reads from the EEPROM.
   2348 	 */
   2349 	if (wm_nvm_validate_checksum(sc)) {
   2350 		/*
   2351 		 * Read twice again because some PCI-e parts fail the
   2352 		 * first check due to the link being in sleep state.
   2353 		 */
   2354 		if (wm_nvm_validate_checksum(sc))
   2355 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2356 	}
   2357 
   2358 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2359 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2360 	else {
   2361 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2362 		    sc->sc_nvm_wordsize);
   2363 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2364 			aprint_verbose("iNVM");
   2365 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2366 			aprint_verbose("FLASH(HW)");
   2367 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2368 			aprint_verbose("FLASH");
   2369 		else {
   2370 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2371 				eetype = "SPI";
   2372 			else
   2373 				eetype = "MicroWire";
   2374 			aprint_verbose("(%d address bits) %s EEPROM",
   2375 			    sc->sc_nvm_addrbits, eetype);
   2376 		}
   2377 	}
   2378 	wm_nvm_version(sc);
   2379 	aprint_verbose("\n");
   2380 
   2381 	/*
   2382 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2383 	 * incorrect.
   2384 	 */
   2385 	wm_gmii_setup_phytype(sc, 0, 0);
   2386 
   2387 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2388 	switch (sc->sc_type) {
   2389 	case WM_T_ICH8:
   2390 	case WM_T_ICH9:
   2391 	case WM_T_ICH10:
   2392 	case WM_T_PCH:
   2393 	case WM_T_PCH2:
   2394 	case WM_T_PCH_LPT:
   2395 	case WM_T_PCH_SPT:
   2396 	case WM_T_PCH_CNP:
   2397 		apme_mask = WUC_APME;
   2398 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2399 		if ((eeprom_data & apme_mask) != 0)
   2400 			sc->sc_flags |= WM_F_WOL;
   2401 		break;
   2402 	default:
   2403 		break;
   2404 	}
   2405 
   2406 	/* Reset the chip to a known state. */
   2407 	wm_reset(sc);
   2408 
   2409 	/*
   2410 	 * Check for I21[01] PLL workaround.
   2411 	 *
   2412 	 * Three cases:
   2413 	 * a) Chip is I211.
   2414 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2415 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2416 	 */
   2417 	if (sc->sc_type == WM_T_I211)
   2418 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2419 	if (sc->sc_type == WM_T_I210) {
   2420 		if (!wm_nvm_flash_presence_i210(sc))
   2421 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2422 		else if ((sc->sc_nvm_ver_major < 3)
   2423 		    || ((sc->sc_nvm_ver_major == 3)
   2424 			&& (sc->sc_nvm_ver_minor < 25))) {
   2425 			aprint_verbose_dev(sc->sc_dev,
   2426 			    "ROM image version %d.%d is older than 3.25\n",
   2427 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2428 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2429 		}
   2430 	}
   2431 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2432 		wm_pll_workaround_i210(sc);
   2433 
   2434 	wm_get_wakeup(sc);
   2435 
   2436 	/* Non-AMT based hardware can now take control from firmware */
   2437 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2438 		wm_get_hw_control(sc);
   2439 
   2440 	/*
   2441 	 * Read the Ethernet address from the EEPROM, if not first found
   2442 	 * in device properties.
   2443 	 */
   2444 	ea = prop_dictionary_get(dict, "mac-address");
   2445 	if (ea != NULL) {
   2446 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2447 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2448 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2449 	} else {
   2450 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2451 			aprint_error_dev(sc->sc_dev,
   2452 			    "unable to read Ethernet address\n");
   2453 			goto out;
   2454 		}
   2455 	}
   2456 
   2457 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2458 	    ether_sprintf(enaddr));
   2459 
   2460 	/*
   2461 	 * Read the config info from the EEPROM, and set up various
   2462 	 * bits in the control registers based on their contents.
   2463 	 */
   2464 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2465 	if (pn != NULL) {
   2466 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2467 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2468 	} else {
   2469 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2470 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2471 			goto out;
   2472 		}
   2473 	}
   2474 
   2475 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2476 	if (pn != NULL) {
   2477 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2478 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2479 	} else {
   2480 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2481 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	/* check for WM_F_WOL */
   2487 	switch (sc->sc_type) {
   2488 	case WM_T_82542_2_0:
   2489 	case WM_T_82542_2_1:
   2490 	case WM_T_82543:
   2491 		/* dummy? */
   2492 		eeprom_data = 0;
   2493 		apme_mask = NVM_CFG3_APME;
   2494 		break;
   2495 	case WM_T_82544:
   2496 		apme_mask = NVM_CFG2_82544_APM_EN;
   2497 		eeprom_data = cfg2;
   2498 		break;
   2499 	case WM_T_82546:
   2500 	case WM_T_82546_3:
   2501 	case WM_T_82571:
   2502 	case WM_T_82572:
   2503 	case WM_T_82573:
   2504 	case WM_T_82574:
   2505 	case WM_T_82583:
   2506 	case WM_T_80003:
   2507 	case WM_T_82575:
   2508 	case WM_T_82576:
   2509 		apme_mask = NVM_CFG3_APME;
   2510 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2511 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2512 		break;
   2513 	case WM_T_82580:
   2514 	case WM_T_I350:
   2515 	case WM_T_I354:
   2516 	case WM_T_I210:
   2517 	case WM_T_I211:
   2518 		apme_mask = NVM_CFG3_APME;
   2519 		wm_nvm_read(sc,
   2520 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2521 		    1, &eeprom_data);
   2522 		break;
   2523 	case WM_T_ICH8:
   2524 	case WM_T_ICH9:
   2525 	case WM_T_ICH10:
   2526 	case WM_T_PCH:
   2527 	case WM_T_PCH2:
   2528 	case WM_T_PCH_LPT:
   2529 	case WM_T_PCH_SPT:
   2530 	case WM_T_PCH_CNP:
   2531 		/* Already checked before wm_reset () */
   2532 		apme_mask = eeprom_data = 0;
   2533 		break;
   2534 	default: /* XXX 82540 */
   2535 		apme_mask = NVM_CFG3_APME;
   2536 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2537 		break;
   2538 	}
   2539 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2540 	if ((eeprom_data & apme_mask) != 0)
   2541 		sc->sc_flags |= WM_F_WOL;
   2542 
   2543 	/*
   2544 	 * We have the eeprom settings, now apply the special cases
   2545 	 * where the eeprom may be wrong or the board won't support
   2546 	 * wake on lan on a particular port
   2547 	 */
   2548 	switch (sc->sc_pcidevid) {
   2549 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2550 		sc->sc_flags &= ~WM_F_WOL;
   2551 		break;
   2552 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2553 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2554 		/* Wake events only supported on port A for dual fiber
   2555 		 * regardless of eeprom setting */
   2556 		if (sc->sc_funcid == 1)
   2557 			sc->sc_flags &= ~WM_F_WOL;
   2558 		break;
   2559 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2560 		/* if quad port adapter, disable WoL on all but port A */
   2561 		if (sc->sc_funcid != 0)
   2562 			sc->sc_flags &= ~WM_F_WOL;
   2563 		break;
   2564 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2565 		/* Wake events only supported on port A for dual fiber
   2566 		 * regardless of eeprom setting */
   2567 		if (sc->sc_funcid == 1)
   2568 			sc->sc_flags &= ~WM_F_WOL;
   2569 		break;
   2570 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2571 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2572 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2573 		/* if quad port adapter, disable WoL on all but port A */
   2574 		if (sc->sc_funcid != 0)
   2575 			sc->sc_flags &= ~WM_F_WOL;
   2576 		break;
   2577 	}
   2578 
   2579 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2580 		/* Check NVM for autonegotiation */
   2581 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2582 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2583 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2584 		}
   2585 	}
   2586 
   2587 	/*
   2588 	 * XXX need special handling for some multiple port cards
   2589 	 * to disable a paticular port.
   2590 	 */
   2591 
   2592 	if (sc->sc_type >= WM_T_82544) {
   2593 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2594 		if (pn != NULL) {
   2595 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2596 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2597 		} else {
   2598 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2599 				aprint_error_dev(sc->sc_dev,
   2600 				    "unable to read SWDPIN\n");
   2601 				goto out;
   2602 			}
   2603 		}
   2604 	}
   2605 
   2606 	if (cfg1 & NVM_CFG1_ILOS)
   2607 		sc->sc_ctrl |= CTRL_ILOS;
   2608 
   2609 	/*
   2610 	 * XXX
   2611 	 * This code isn't correct because pin 2 and 3 are located
   2612 	 * in different position on newer chips. Check all datasheet.
   2613 	 *
   2614 	 * Until resolve this problem, check if a chip < 82580
   2615 	 */
   2616 	if (sc->sc_type <= WM_T_82580) {
   2617 		if (sc->sc_type >= WM_T_82544) {
   2618 			sc->sc_ctrl |=
   2619 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2620 			    CTRL_SWDPIO_SHIFT;
   2621 			sc->sc_ctrl |=
   2622 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2623 			    CTRL_SWDPINS_SHIFT;
   2624 		} else {
   2625 			sc->sc_ctrl |=
   2626 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2627 			    CTRL_SWDPIO_SHIFT;
   2628 		}
   2629 	}
   2630 
   2631 	/* XXX For other than 82580? */
   2632 	if (sc->sc_type == WM_T_82580) {
   2633 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2634 		if (nvmword & __BIT(13))
   2635 			sc->sc_ctrl |= CTRL_ILOS;
   2636 	}
   2637 
   2638 #if 0
   2639 	if (sc->sc_type >= WM_T_82544) {
   2640 		if (cfg1 & NVM_CFG1_IPS0)
   2641 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2642 		if (cfg1 & NVM_CFG1_IPS1)
   2643 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2644 		sc->sc_ctrl_ext |=
   2645 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2646 		    CTRL_EXT_SWDPIO_SHIFT;
   2647 		sc->sc_ctrl_ext |=
   2648 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2649 		    CTRL_EXT_SWDPINS_SHIFT;
   2650 	} else {
   2651 		sc->sc_ctrl_ext |=
   2652 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2653 		    CTRL_EXT_SWDPIO_SHIFT;
   2654 	}
   2655 #endif
   2656 
   2657 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2658 #if 0
   2659 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2660 #endif
   2661 
   2662 	if (sc->sc_type == WM_T_PCH) {
   2663 		uint16_t val;
   2664 
   2665 		/* Save the NVM K1 bit setting */
   2666 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2667 
   2668 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2669 			sc->sc_nvm_k1_enabled = 1;
   2670 		else
   2671 			sc->sc_nvm_k1_enabled = 0;
   2672 	}
   2673 
   2674 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2675 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2676 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2677 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2678 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2679 	    || sc->sc_type == WM_T_82573
   2680 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2681 		/* Copper only */
   2682 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2683 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2684 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2685 	    || (sc->sc_type ==WM_T_I211)) {
   2686 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2687 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2688 		switch (link_mode) {
   2689 		case CTRL_EXT_LINK_MODE_1000KX:
   2690 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2691 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2692 			break;
   2693 		case CTRL_EXT_LINK_MODE_SGMII:
   2694 			if (wm_sgmii_uses_mdio(sc)) {
   2695 				aprint_verbose_dev(sc->sc_dev,
   2696 				    "SGMII(MDIO)\n");
   2697 				sc->sc_flags |= WM_F_SGMII;
   2698 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2699 				break;
   2700 			}
   2701 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2702 			/*FALLTHROUGH*/
   2703 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2704 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2705 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2706 				if (link_mode
   2707 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2708 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2709 					sc->sc_flags |= WM_F_SGMII;
   2710 				} else {
   2711 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2712 					aprint_verbose_dev(sc->sc_dev,
   2713 					    "SERDES\n");
   2714 				}
   2715 				break;
   2716 			}
   2717 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2718 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2719 
   2720 			/* Change current link mode setting */
   2721 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2722 			switch (sc->sc_mediatype) {
   2723 			case WM_MEDIATYPE_COPPER:
   2724 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2725 				break;
   2726 			case WM_MEDIATYPE_SERDES:
   2727 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2728 				break;
   2729 			default:
   2730 				break;
   2731 			}
   2732 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2733 			break;
   2734 		case CTRL_EXT_LINK_MODE_GMII:
   2735 		default:
   2736 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2737 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2738 			break;
   2739 		}
   2740 
   2741 		reg &= ~CTRL_EXT_I2C_ENA;
   2742 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2743 			reg |= CTRL_EXT_I2C_ENA;
   2744 		else
   2745 			reg &= ~CTRL_EXT_I2C_ENA;
   2746 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2747 	} else if (sc->sc_type < WM_T_82543 ||
   2748 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2749 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2750 			aprint_error_dev(sc->sc_dev,
   2751 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2752 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2753 		}
   2754 	} else {
   2755 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2756 			aprint_error_dev(sc->sc_dev,
   2757 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2758 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2759 		}
   2760 	}
   2761 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2762 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2763 
   2764 	/* Set device properties (macflags) */
   2765 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2766 
   2767 	/* Initialize the media structures accordingly. */
   2768 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2769 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2770 	else
   2771 		wm_tbi_mediainit(sc); /* All others */
   2772 
   2773 	ifp = &sc->sc_ethercom.ec_if;
   2774 	xname = device_xname(sc->sc_dev);
   2775 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2776 	ifp->if_softc = sc;
   2777 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2778 #ifdef WM_MPSAFE
   2779 	ifp->if_extflags = IFEF_MPSAFE;
   2780 #endif
   2781 	ifp->if_ioctl = wm_ioctl;
   2782 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2783 		ifp->if_start = wm_nq_start;
   2784 		/*
   2785 		 * When the number of CPUs is one and the controller can use
   2786 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2787 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2788 		 * and the other is used for link status changing.
   2789 		 * In this situation, wm_nq_transmit() is disadvantageous
   2790 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2791 		 */
   2792 		if (wm_is_using_multiqueue(sc))
   2793 			ifp->if_transmit = wm_nq_transmit;
   2794 	} else {
   2795 		ifp->if_start = wm_start;
   2796 		/*
   2797 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2798 		 */
   2799 		if (wm_is_using_multiqueue(sc))
   2800 			ifp->if_transmit = wm_transmit;
   2801 	}
   2802 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2803 	ifp->if_init = wm_init;
   2804 	ifp->if_stop = wm_stop;
   2805 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2806 	IFQ_SET_READY(&ifp->if_snd);
   2807 
   2808 	/* Check for jumbo frame */
   2809 	switch (sc->sc_type) {
   2810 	case WM_T_82573:
   2811 		/* XXX limited to 9234 if ASPM is disabled */
   2812 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2813 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2814 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2815 		break;
   2816 	case WM_T_82571:
   2817 	case WM_T_82572:
   2818 	case WM_T_82574:
   2819 	case WM_T_82583:
   2820 	case WM_T_82575:
   2821 	case WM_T_82576:
   2822 	case WM_T_82580:
   2823 	case WM_T_I350:
   2824 	case WM_T_I354:
   2825 	case WM_T_I210:
   2826 	case WM_T_I211:
   2827 	case WM_T_80003:
   2828 	case WM_T_ICH9:
   2829 	case WM_T_ICH10:
   2830 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2831 	case WM_T_PCH_LPT:
   2832 	case WM_T_PCH_SPT:
   2833 	case WM_T_PCH_CNP:
   2834 		/* XXX limited to 9234 */
   2835 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2836 		break;
   2837 	case WM_T_PCH:
   2838 		/* XXX limited to 4096 */
   2839 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2840 		break;
   2841 	case WM_T_82542_2_0:
   2842 	case WM_T_82542_2_1:
   2843 	case WM_T_ICH8:
   2844 		/* No support for jumbo frame */
   2845 		break;
   2846 	default:
   2847 		/* ETHER_MAX_LEN_JUMBO */
   2848 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2849 		break;
   2850 	}
   2851 
   2852 	/* If we're a i82543 or greater, we can support VLANs. */
   2853 	if (sc->sc_type >= WM_T_82543)
   2854 		sc->sc_ethercom.ec_capabilities |=
   2855 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2856 
   2857 	/*
   2858 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2859 	 * on i82543 and later.
   2860 	 */
   2861 	if (sc->sc_type >= WM_T_82543) {
   2862 		ifp->if_capabilities |=
   2863 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2864 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2865 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2866 		    IFCAP_CSUM_TCPv6_Tx |
   2867 		    IFCAP_CSUM_UDPv6_Tx;
   2868 	}
   2869 
   2870 	/*
   2871 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2872 	 *
   2873 	 *	82541GI (8086:1076) ... no
   2874 	 *	82572EI (8086:10b9) ... yes
   2875 	 */
   2876 	if (sc->sc_type >= WM_T_82571) {
   2877 		ifp->if_capabilities |=
   2878 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2879 	}
   2880 
   2881 	/*
   2882 	 * If we're a i82544 or greater (except i82547), we can do
   2883 	 * TCP segmentation offload.
   2884 	 */
   2885 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2886 		ifp->if_capabilities |= IFCAP_TSOv4;
   2887 	}
   2888 
   2889 	if (sc->sc_type >= WM_T_82571) {
   2890 		ifp->if_capabilities |= IFCAP_TSOv6;
   2891 	}
   2892 
   2893 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2894 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2895 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2896 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2897 
   2898 #ifdef WM_MPSAFE
   2899 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2900 #else
   2901 	sc->sc_core_lock = NULL;
   2902 #endif
   2903 
   2904 	/* Attach the interface. */
   2905 	error = if_initialize(ifp);
   2906 	if (error != 0) {
   2907 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2908 		    error);
   2909 		return; /* Error */
   2910 	}
   2911 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2912 	ether_ifattach(ifp, enaddr);
   2913 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2914 	if_register(ifp);
   2915 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2916 	    RND_FLAG_DEFAULT);
   2917 
   2918 #ifdef WM_EVENT_COUNTERS
   2919 	/* Attach event counters. */
   2920 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2921 	    NULL, xname, "linkintr");
   2922 
   2923 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2924 	    NULL, xname, "tx_xoff");
   2925 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2926 	    NULL, xname, "tx_xon");
   2927 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2928 	    NULL, xname, "rx_xoff");
   2929 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2930 	    NULL, xname, "rx_xon");
   2931 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2932 	    NULL, xname, "rx_macctl");
   2933 #endif /* WM_EVENT_COUNTERS */
   2934 
   2935 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2936 		pmf_class_network_register(self, ifp);
   2937 	else
   2938 		aprint_error_dev(self, "couldn't establish power handler\n");
   2939 
   2940 	sc->sc_flags |= WM_F_ATTACHED;
   2941 out:
   2942 	return;
   2943 }
   2944 
   2945 /* The detach function (ca_detach) */
   2946 static int
   2947 wm_detach(device_t self, int flags __unused)
   2948 {
   2949 	struct wm_softc *sc = device_private(self);
   2950 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2951 	int i;
   2952 
   2953 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2954 		return 0;
   2955 
   2956 	/* Stop the interface. Callouts are stopped in it. */
   2957 	wm_stop(ifp, 1);
   2958 
   2959 	pmf_device_deregister(self);
   2960 
   2961 #ifdef WM_EVENT_COUNTERS
   2962 	evcnt_detach(&sc->sc_ev_linkintr);
   2963 
   2964 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2965 	evcnt_detach(&sc->sc_ev_tx_xon);
   2966 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2967 	evcnt_detach(&sc->sc_ev_rx_xon);
   2968 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2969 #endif /* WM_EVENT_COUNTERS */
   2970 
   2971 	/* Tell the firmware about the release */
   2972 	WM_CORE_LOCK(sc);
   2973 	wm_release_manageability(sc);
   2974 	wm_release_hw_control(sc);
   2975 	wm_enable_wakeup(sc);
   2976 	WM_CORE_UNLOCK(sc);
   2977 
   2978 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2979 
   2980 	/* Delete all remaining media. */
   2981 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2982 
   2983 	ether_ifdetach(ifp);
   2984 	if_detach(ifp);
   2985 	if_percpuq_destroy(sc->sc_ipq);
   2986 
   2987 	/* Unload RX dmamaps and free mbufs */
   2988 	for (i = 0; i < sc->sc_nqueues; i++) {
   2989 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2990 		mutex_enter(rxq->rxq_lock);
   2991 		wm_rxdrain(rxq);
   2992 		mutex_exit(rxq->rxq_lock);
   2993 	}
   2994 	/* Must unlock here */
   2995 
   2996 	/* Disestablish the interrupt handler */
   2997 	for (i = 0; i < sc->sc_nintrs; i++) {
   2998 		if (sc->sc_ihs[i] != NULL) {
   2999 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3000 			sc->sc_ihs[i] = NULL;
   3001 		}
   3002 	}
   3003 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3004 
   3005 	wm_free_txrx_queues(sc);
   3006 
   3007 	/* Unmap the registers */
   3008 	if (sc->sc_ss) {
   3009 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3010 		sc->sc_ss = 0;
   3011 	}
   3012 	if (sc->sc_ios) {
   3013 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3014 		sc->sc_ios = 0;
   3015 	}
   3016 	if (sc->sc_flashs) {
   3017 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3018 		sc->sc_flashs = 0;
   3019 	}
   3020 
   3021 	if (sc->sc_core_lock)
   3022 		mutex_obj_free(sc->sc_core_lock);
   3023 	if (sc->sc_ich_phymtx)
   3024 		mutex_obj_free(sc->sc_ich_phymtx);
   3025 	if (sc->sc_ich_nvmmtx)
   3026 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3027 
   3028 	return 0;
   3029 }
   3030 
   3031 static bool
   3032 wm_suspend(device_t self, const pmf_qual_t *qual)
   3033 {
   3034 	struct wm_softc *sc = device_private(self);
   3035 
   3036 	wm_release_manageability(sc);
   3037 	wm_release_hw_control(sc);
   3038 	wm_enable_wakeup(sc);
   3039 
   3040 	return true;
   3041 }
   3042 
   3043 static bool
   3044 wm_resume(device_t self, const pmf_qual_t *qual)
   3045 {
   3046 	struct wm_softc *sc = device_private(self);
   3047 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3048 	pcireg_t reg;
   3049 	char buf[256];
   3050 
   3051 	reg = CSR_READ(sc, WMREG_WUS);
   3052 	if (reg != 0) {
   3053 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3054 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3055 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3056 	}
   3057 
   3058 	if (sc->sc_type >= WM_T_PCH2)
   3059 		wm_resume_workarounds_pchlan(sc);
   3060 	if ((ifp->if_flags & IFF_UP) == 0) {
   3061 		wm_reset(sc);
   3062 		/* Non-AMT based hardware can now take control from firmware */
   3063 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3064 			wm_get_hw_control(sc);
   3065 		wm_init_manageability(sc);
   3066 	} else {
   3067 		/*
   3068 		 * We called pmf_class_network_register(), so if_init() is
   3069 		 * automatically called when IFF_UP. wm_reset(),
   3070 		 * wm_get_hw_control() and wm_init_manageability() are called
   3071 		 * via wm_init().
   3072 		 */
   3073 	}
   3074 
   3075 	return true;
   3076 }
   3077 
   3078 /*
   3079  * wm_watchdog:		[ifnet interface function]
   3080  *
   3081  *	Watchdog timer handler.
   3082  */
   3083 static void
   3084 wm_watchdog(struct ifnet *ifp)
   3085 {
   3086 	int qid;
   3087 	struct wm_softc *sc = ifp->if_softc;
   3088 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3089 
   3090 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3091 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3092 
   3093 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3094 	}
   3095 
   3096 	/*
   3097 	 * IF any of queues hanged up, reset the interface.
   3098 	 */
   3099 	if (hang_queue != 0) {
   3100 		(void) wm_init(ifp);
   3101 
   3102 		/*
   3103 		 * There are still some upper layer processing which call
   3104 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3105 		 */
   3106 		/* Try to get more packets going. */
   3107 		ifp->if_start(ifp);
   3108 	}
   3109 }
   3110 
   3111 
   3112 static void
   3113 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3114 {
   3115 
   3116 	mutex_enter(txq->txq_lock);
   3117 	if (txq->txq_sending &&
   3118 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3119 		wm_watchdog_txq_locked(ifp, txq, hang);
   3120 	}
   3121 	mutex_exit(txq->txq_lock);
   3122 }
   3123 
   3124 static void
   3125 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3126     uint16_t *hang)
   3127 {
   3128 	struct wm_softc *sc = ifp->if_softc;
   3129 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3130 
   3131 	KASSERT(mutex_owned(txq->txq_lock));
   3132 
   3133 	/*
   3134 	 * Since we're using delayed interrupts, sweep up
   3135 	 * before we report an error.
   3136 	 */
   3137 	wm_txeof(txq, UINT_MAX);
   3138 
   3139 	if (txq->txq_sending)
   3140 		*hang |= __BIT(wmq->wmq_id);
   3141 
   3142 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3143 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3144 		    device_xname(sc->sc_dev));
   3145 	} else {
   3146 #ifdef WM_DEBUG
   3147 		int i, j;
   3148 		struct wm_txsoft *txs;
   3149 #endif
   3150 		log(LOG_ERR,
   3151 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3152 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3153 		    txq->txq_next);
   3154 		ifp->if_oerrors++;
   3155 #ifdef WM_DEBUG
   3156 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3157 		    i = WM_NEXTTXS(txq, i)) {
   3158 		    txs = &txq->txq_soft[i];
   3159 		    printf("txs %d tx %d -> %d\n",
   3160 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3161 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3162 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3163 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3164 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3165 				    printf("\t %#08x%08x\n",
   3166 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3167 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3168 			    } else {
   3169 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3170 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3171 					txq->txq_descs[j].wtx_addr.wa_low);
   3172 				    printf("\t %#04x%02x%02x%08x\n",
   3173 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3174 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3175 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3176 					txq->txq_descs[j].wtx_cmdlen);
   3177 			    }
   3178 			if (j == txs->txs_lastdesc)
   3179 				break;
   3180 			}
   3181 		}
   3182 #endif
   3183 	}
   3184 }
   3185 
   3186 /*
   3187  * wm_tick:
   3188  *
   3189  *	One second timer, used to check link status, sweep up
   3190  *	completed transmit jobs, etc.
   3191  */
   3192 static void
   3193 wm_tick(void *arg)
   3194 {
   3195 	struct wm_softc *sc = arg;
   3196 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3197 #ifndef WM_MPSAFE
   3198 	int s = splnet();
   3199 #endif
   3200 
   3201 	WM_CORE_LOCK(sc);
   3202 
   3203 	if (sc->sc_core_stopping) {
   3204 		WM_CORE_UNLOCK(sc);
   3205 #ifndef WM_MPSAFE
   3206 		splx(s);
   3207 #endif
   3208 		return;
   3209 	}
   3210 
   3211 	if (sc->sc_type >= WM_T_82542_2_1) {
   3212 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3213 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3214 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3215 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3216 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3217 	}
   3218 
   3219 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3220 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3221 	    + CSR_READ(sc, WMREG_CRCERRS)
   3222 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3223 	    + CSR_READ(sc, WMREG_SYMERRC)
   3224 	    + CSR_READ(sc, WMREG_RXERRC)
   3225 	    + CSR_READ(sc, WMREG_SEC)
   3226 	    + CSR_READ(sc, WMREG_CEXTERR)
   3227 	    + CSR_READ(sc, WMREG_RLEC);
   3228 	/*
   3229 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3230 	 * memory. It does not mean the number of dropped packet. Because
   3231 	 * ethernet controller can receive packets in such case if there is
   3232 	 * space in phy's FIFO.
   3233 	 *
   3234 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3235 	 * own EVCNT instead of if_iqdrops.
   3236 	 */
   3237 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3238 
   3239 	if (sc->sc_flags & WM_F_HAS_MII)
   3240 		mii_tick(&sc->sc_mii);
   3241 	else if ((sc->sc_type >= WM_T_82575)
   3242 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3243 		wm_serdes_tick(sc);
   3244 	else
   3245 		wm_tbi_tick(sc);
   3246 
   3247 	WM_CORE_UNLOCK(sc);
   3248 
   3249 	wm_watchdog(ifp);
   3250 
   3251 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3252 }
   3253 
   3254 static int
   3255 wm_ifflags_cb(struct ethercom *ec)
   3256 {
   3257 	struct ifnet *ifp = &ec->ec_if;
   3258 	struct wm_softc *sc = ifp->if_softc;
   3259 	int rc = 0;
   3260 
   3261 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3262 		device_xname(sc->sc_dev), __func__));
   3263 
   3264 	WM_CORE_LOCK(sc);
   3265 
   3266 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3267 	sc->sc_if_flags = ifp->if_flags;
   3268 
   3269 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3270 		rc = ENETRESET;
   3271 		goto out;
   3272 	}
   3273 
   3274 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3275 		wm_set_filter(sc);
   3276 
   3277 	wm_set_vlan(sc);
   3278 
   3279 out:
   3280 	WM_CORE_UNLOCK(sc);
   3281 
   3282 	return rc;
   3283 }
   3284 
   3285 /*
   3286  * wm_ioctl:		[ifnet interface function]
   3287  *
   3288  *	Handle control requests from the operator.
   3289  */
   3290 static int
   3291 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3292 {
   3293 	struct wm_softc *sc = ifp->if_softc;
   3294 	struct ifreq *ifr = (struct ifreq *) data;
   3295 	struct ifaddr *ifa = (struct ifaddr *)data;
   3296 	struct sockaddr_dl *sdl;
   3297 	int s, error;
   3298 
   3299 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3300 		device_xname(sc->sc_dev), __func__));
   3301 
   3302 #ifndef WM_MPSAFE
   3303 	s = splnet();
   3304 #endif
   3305 	switch (cmd) {
   3306 	case SIOCSIFMEDIA:
   3307 	case SIOCGIFMEDIA:
   3308 		WM_CORE_LOCK(sc);
   3309 		/* Flow control requires full-duplex mode. */
   3310 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3311 		    (ifr->ifr_media & IFM_FDX) == 0)
   3312 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3313 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3314 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3315 				/* We can do both TXPAUSE and RXPAUSE. */
   3316 				ifr->ifr_media |=
   3317 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3318 			}
   3319 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3320 		}
   3321 		WM_CORE_UNLOCK(sc);
   3322 #ifdef WM_MPSAFE
   3323 		s = splnet();
   3324 #endif
   3325 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3326 #ifdef WM_MPSAFE
   3327 		splx(s);
   3328 #endif
   3329 		break;
   3330 	case SIOCINITIFADDR:
   3331 		WM_CORE_LOCK(sc);
   3332 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3333 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3334 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3335 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3336 			/* unicast address is first multicast entry */
   3337 			wm_set_filter(sc);
   3338 			error = 0;
   3339 			WM_CORE_UNLOCK(sc);
   3340 			break;
   3341 		}
   3342 		WM_CORE_UNLOCK(sc);
   3343 		/*FALLTHROUGH*/
   3344 	default:
   3345 #ifdef WM_MPSAFE
   3346 		s = splnet();
   3347 #endif
   3348 		/* It may call wm_start, so unlock here */
   3349 		error = ether_ioctl(ifp, cmd, data);
   3350 #ifdef WM_MPSAFE
   3351 		splx(s);
   3352 #endif
   3353 		if (error != ENETRESET)
   3354 			break;
   3355 
   3356 		error = 0;
   3357 
   3358 		if (cmd == SIOCSIFCAP)
   3359 			error = (*ifp->if_init)(ifp);
   3360 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3361 			;
   3362 		else if (ifp->if_flags & IFF_RUNNING) {
   3363 			/*
   3364 			 * Multicast list has changed; set the hardware filter
   3365 			 * accordingly.
   3366 			 */
   3367 			WM_CORE_LOCK(sc);
   3368 			wm_set_filter(sc);
   3369 			WM_CORE_UNLOCK(sc);
   3370 		}
   3371 		break;
   3372 	}
   3373 
   3374 #ifndef WM_MPSAFE
   3375 	splx(s);
   3376 #endif
   3377 	return error;
   3378 }
   3379 
   3380 /* MAC address related */
   3381 
   3382 /*
   3383  * Get the offset of MAC address and return it.
   3384  * If error occured, use offset 0.
   3385  */
   3386 static uint16_t
   3387 wm_check_alt_mac_addr(struct wm_softc *sc)
   3388 {
   3389 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3390 	uint16_t offset = NVM_OFF_MACADDR;
   3391 
   3392 	/* Try to read alternative MAC address pointer */
   3393 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3394 		return 0;
   3395 
   3396 	/* Check pointer if it's valid or not. */
   3397 	if ((offset == 0x0000) || (offset == 0xffff))
   3398 		return 0;
   3399 
   3400 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3401 	/*
   3402 	 * Check whether alternative MAC address is valid or not.
   3403 	 * Some cards have non 0xffff pointer but those don't use
   3404 	 * alternative MAC address in reality.
   3405 	 *
   3406 	 * Check whether the broadcast bit is set or not.
   3407 	 */
   3408 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3409 		if (((myea[0] & 0xff) & 0x01) == 0)
   3410 			return offset; /* Found */
   3411 
   3412 	/* Not found */
   3413 	return 0;
   3414 }
   3415 
   3416 static int
   3417 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3418 {
   3419 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3420 	uint16_t offset = NVM_OFF_MACADDR;
   3421 	int do_invert = 0;
   3422 
   3423 	switch (sc->sc_type) {
   3424 	case WM_T_82580:
   3425 	case WM_T_I350:
   3426 	case WM_T_I354:
   3427 		/* EEPROM Top Level Partitioning */
   3428 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3429 		break;
   3430 	case WM_T_82571:
   3431 	case WM_T_82575:
   3432 	case WM_T_82576:
   3433 	case WM_T_80003:
   3434 	case WM_T_I210:
   3435 	case WM_T_I211:
   3436 		offset = wm_check_alt_mac_addr(sc);
   3437 		if (offset == 0)
   3438 			if ((sc->sc_funcid & 0x01) == 1)
   3439 				do_invert = 1;
   3440 		break;
   3441 	default:
   3442 		if ((sc->sc_funcid & 0x01) == 1)
   3443 			do_invert = 1;
   3444 		break;
   3445 	}
   3446 
   3447 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3448 		goto bad;
   3449 
   3450 	enaddr[0] = myea[0] & 0xff;
   3451 	enaddr[1] = myea[0] >> 8;
   3452 	enaddr[2] = myea[1] & 0xff;
   3453 	enaddr[3] = myea[1] >> 8;
   3454 	enaddr[4] = myea[2] & 0xff;
   3455 	enaddr[5] = myea[2] >> 8;
   3456 
   3457 	/*
   3458 	 * Toggle the LSB of the MAC address on the second port
   3459 	 * of some dual port cards.
   3460 	 */
   3461 	if (do_invert != 0)
   3462 		enaddr[5] ^= 1;
   3463 
   3464 	return 0;
   3465 
   3466  bad:
   3467 	return -1;
   3468 }
   3469 
   3470 /*
   3471  * wm_set_ral:
   3472  *
   3473  *	Set an entery in the receive address list.
   3474  */
   3475 static void
   3476 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3477 {
   3478 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3479 	uint32_t wlock_mac;
   3480 	int rv;
   3481 
   3482 	if (enaddr != NULL) {
   3483 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3484 		    (enaddr[3] << 24);
   3485 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3486 		ral_hi |= RAL_AV;
   3487 	} else {
   3488 		ral_lo = 0;
   3489 		ral_hi = 0;
   3490 	}
   3491 
   3492 	switch (sc->sc_type) {
   3493 	case WM_T_82542_2_0:
   3494 	case WM_T_82542_2_1:
   3495 	case WM_T_82543:
   3496 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3497 		CSR_WRITE_FLUSH(sc);
   3498 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3499 		CSR_WRITE_FLUSH(sc);
   3500 		break;
   3501 	case WM_T_PCH2:
   3502 	case WM_T_PCH_LPT:
   3503 	case WM_T_PCH_SPT:
   3504 	case WM_T_PCH_CNP:
   3505 		if (idx == 0) {
   3506 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3507 			CSR_WRITE_FLUSH(sc);
   3508 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3509 			CSR_WRITE_FLUSH(sc);
   3510 			return;
   3511 		}
   3512 		if (sc->sc_type != WM_T_PCH2) {
   3513 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3514 			    FWSM_WLOCK_MAC);
   3515 			addrl = WMREG_SHRAL(idx - 1);
   3516 			addrh = WMREG_SHRAH(idx - 1);
   3517 		} else {
   3518 			wlock_mac = 0;
   3519 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3520 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3521 		}
   3522 
   3523 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3524 			rv = wm_get_swflag_ich8lan(sc);
   3525 			if (rv != 0)
   3526 				return;
   3527 			CSR_WRITE(sc, addrl, ral_lo);
   3528 			CSR_WRITE_FLUSH(sc);
   3529 			CSR_WRITE(sc, addrh, ral_hi);
   3530 			CSR_WRITE_FLUSH(sc);
   3531 			wm_put_swflag_ich8lan(sc);
   3532 		}
   3533 
   3534 		break;
   3535 	default:
   3536 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3537 		CSR_WRITE_FLUSH(sc);
   3538 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3539 		CSR_WRITE_FLUSH(sc);
   3540 		break;
   3541 	}
   3542 }
   3543 
   3544 /*
   3545  * wm_mchash:
   3546  *
   3547  *	Compute the hash of the multicast address for the 4096-bit
   3548  *	multicast filter.
   3549  */
   3550 static uint32_t
   3551 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3552 {
   3553 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3554 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3555 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3556 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3557 	uint32_t hash;
   3558 
   3559 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3560 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3561 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3562 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3563 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3564 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3565 		return (hash & 0x3ff);
   3566 	}
   3567 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3568 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3569 
   3570 	return (hash & 0xfff);
   3571 }
   3572 
   3573 /*
   3574  *
   3575  *
   3576  */
   3577 static int
   3578 wm_rar_count(struct wm_softc *sc)
   3579 {
   3580 	int size;
   3581 
   3582 	switch (sc->sc_type) {
   3583 	case WM_T_ICH8:
   3584 		size = WM_RAL_TABSIZE_ICH8 -1;
   3585 		break;
   3586 	case WM_T_ICH9:
   3587 	case WM_T_ICH10:
   3588 	case WM_T_PCH:
   3589 		size = WM_RAL_TABSIZE_ICH8;
   3590 		break;
   3591 	case WM_T_PCH2:
   3592 		size = WM_RAL_TABSIZE_PCH2;
   3593 		break;
   3594 	case WM_T_PCH_LPT:
   3595 	case WM_T_PCH_SPT:
   3596 	case WM_T_PCH_CNP:
   3597 		size = WM_RAL_TABSIZE_PCH_LPT;
   3598 		break;
   3599 	case WM_T_82575:
   3600 		size = WM_RAL_TABSIZE_82575;
   3601 		break;
   3602 	case WM_T_82576:
   3603 	case WM_T_82580:
   3604 		size = WM_RAL_TABSIZE_82576;
   3605 		break;
   3606 	case WM_T_I350:
   3607 	case WM_T_I354:
   3608 		size = WM_RAL_TABSIZE_I350;
   3609 		break;
   3610 	default:
   3611 		size = WM_RAL_TABSIZE;
   3612 	}
   3613 
   3614 	return size;
   3615 }
   3616 
   3617 /*
   3618  * wm_set_filter:
   3619  *
   3620  *	Set up the receive filter.
   3621  */
   3622 static void
   3623 wm_set_filter(struct wm_softc *sc)
   3624 {
   3625 	struct ethercom *ec = &sc->sc_ethercom;
   3626 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3627 	struct ether_multi *enm;
   3628 	struct ether_multistep step;
   3629 	bus_addr_t mta_reg;
   3630 	uint32_t hash, reg, bit;
   3631 	int i, size, ralmax;
   3632 
   3633 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3634 		device_xname(sc->sc_dev), __func__));
   3635 
   3636 	if (sc->sc_type >= WM_T_82544)
   3637 		mta_reg = WMREG_CORDOVA_MTA;
   3638 	else
   3639 		mta_reg = WMREG_MTA;
   3640 
   3641 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3642 
   3643 	if (ifp->if_flags & IFF_BROADCAST)
   3644 		sc->sc_rctl |= RCTL_BAM;
   3645 	if (ifp->if_flags & IFF_PROMISC) {
   3646 		sc->sc_rctl |= RCTL_UPE;
   3647 		goto allmulti;
   3648 	}
   3649 
   3650 	/*
   3651 	 * Set the station address in the first RAL slot, and
   3652 	 * clear the remaining slots.
   3653 	 */
   3654 	size = wm_rar_count(sc);
   3655 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3656 
   3657 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3658 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3659 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3660 		switch (i) {
   3661 		case 0:
   3662 			/* We can use all entries */
   3663 			ralmax = size;
   3664 			break;
   3665 		case 1:
   3666 			/* Only RAR[0] */
   3667 			ralmax = 1;
   3668 			break;
   3669 		default:
   3670 			/* available SHRA + RAR[0] */
   3671 			ralmax = i + 1;
   3672 		}
   3673 	} else
   3674 		ralmax = size;
   3675 	for (i = 1; i < size; i++) {
   3676 		if (i < ralmax)
   3677 			wm_set_ral(sc, NULL, i);
   3678 	}
   3679 
   3680 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3681 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3682 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3683 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3684 		size = WM_ICH8_MC_TABSIZE;
   3685 	else
   3686 		size = WM_MC_TABSIZE;
   3687 	/* Clear out the multicast table. */
   3688 	for (i = 0; i < size; i++) {
   3689 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3690 		CSR_WRITE_FLUSH(sc);
   3691 	}
   3692 
   3693 	ETHER_LOCK(ec);
   3694 	ETHER_FIRST_MULTI(step, ec, enm);
   3695 	while (enm != NULL) {
   3696 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3697 			ETHER_UNLOCK(ec);
   3698 			/*
   3699 			 * We must listen to a range of multicast addresses.
   3700 			 * For now, just accept all multicasts, rather than
   3701 			 * trying to set only those filter bits needed to match
   3702 			 * the range.  (At this time, the only use of address
   3703 			 * ranges is for IP multicast routing, for which the
   3704 			 * range is big enough to require all bits set.)
   3705 			 */
   3706 			goto allmulti;
   3707 		}
   3708 
   3709 		hash = wm_mchash(sc, enm->enm_addrlo);
   3710 
   3711 		reg = (hash >> 5);
   3712 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3713 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3714 		    || (sc->sc_type == WM_T_PCH2)
   3715 		    || (sc->sc_type == WM_T_PCH_LPT)
   3716 		    || (sc->sc_type == WM_T_PCH_SPT)
   3717 		    || (sc->sc_type == WM_T_PCH_CNP))
   3718 			reg &= 0x1f;
   3719 		else
   3720 			reg &= 0x7f;
   3721 		bit = hash & 0x1f;
   3722 
   3723 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3724 		hash |= 1U << bit;
   3725 
   3726 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3727 			/*
   3728 			 * 82544 Errata 9: Certain register cannot be written
   3729 			 * with particular alignments in PCI-X bus operation
   3730 			 * (FCAH, MTA and VFTA).
   3731 			 */
   3732 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3733 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3734 			CSR_WRITE_FLUSH(sc);
   3735 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3736 			CSR_WRITE_FLUSH(sc);
   3737 		} else {
   3738 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3739 			CSR_WRITE_FLUSH(sc);
   3740 		}
   3741 
   3742 		ETHER_NEXT_MULTI(step, enm);
   3743 	}
   3744 	ETHER_UNLOCK(ec);
   3745 
   3746 	ifp->if_flags &= ~IFF_ALLMULTI;
   3747 	goto setit;
   3748 
   3749  allmulti:
   3750 	ifp->if_flags |= IFF_ALLMULTI;
   3751 	sc->sc_rctl |= RCTL_MPE;
   3752 
   3753  setit:
   3754 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3755 }
   3756 
   3757 /* Reset and init related */
   3758 
   3759 static void
   3760 wm_set_vlan(struct wm_softc *sc)
   3761 {
   3762 
   3763 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3764 		device_xname(sc->sc_dev), __func__));
   3765 
   3766 	/* Deal with VLAN enables. */
   3767 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3768 		sc->sc_ctrl |= CTRL_VME;
   3769 	else
   3770 		sc->sc_ctrl &= ~CTRL_VME;
   3771 
   3772 	/* Write the control registers. */
   3773 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3774 }
   3775 
   3776 static void
   3777 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3778 {
   3779 	uint32_t gcr;
   3780 	pcireg_t ctrl2;
   3781 
   3782 	gcr = CSR_READ(sc, WMREG_GCR);
   3783 
   3784 	/* Only take action if timeout value is defaulted to 0 */
   3785 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3786 		goto out;
   3787 
   3788 	if ((gcr & GCR_CAP_VER2) == 0) {
   3789 		gcr |= GCR_CMPL_TMOUT_10MS;
   3790 		goto out;
   3791 	}
   3792 
   3793 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3794 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3795 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3796 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3797 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3798 
   3799 out:
   3800 	/* Disable completion timeout resend */
   3801 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3802 
   3803 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3804 }
   3805 
   3806 void
   3807 wm_get_auto_rd_done(struct wm_softc *sc)
   3808 {
   3809 	int i;
   3810 
   3811 	/* wait for eeprom to reload */
   3812 	switch (sc->sc_type) {
   3813 	case WM_T_82571:
   3814 	case WM_T_82572:
   3815 	case WM_T_82573:
   3816 	case WM_T_82574:
   3817 	case WM_T_82583:
   3818 	case WM_T_82575:
   3819 	case WM_T_82576:
   3820 	case WM_T_82580:
   3821 	case WM_T_I350:
   3822 	case WM_T_I354:
   3823 	case WM_T_I210:
   3824 	case WM_T_I211:
   3825 	case WM_T_80003:
   3826 	case WM_T_ICH8:
   3827 	case WM_T_ICH9:
   3828 		for (i = 0; i < 10; i++) {
   3829 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3830 				break;
   3831 			delay(1000);
   3832 		}
   3833 		if (i == 10) {
   3834 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3835 			    "complete\n", device_xname(sc->sc_dev));
   3836 		}
   3837 		break;
   3838 	default:
   3839 		break;
   3840 	}
   3841 }
   3842 
   3843 void
   3844 wm_lan_init_done(struct wm_softc *sc)
   3845 {
   3846 	uint32_t reg = 0;
   3847 	int i;
   3848 
   3849 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3850 		device_xname(sc->sc_dev), __func__));
   3851 
   3852 	/* Wait for eeprom to reload */
   3853 	switch (sc->sc_type) {
   3854 	case WM_T_ICH10:
   3855 	case WM_T_PCH:
   3856 	case WM_T_PCH2:
   3857 	case WM_T_PCH_LPT:
   3858 	case WM_T_PCH_SPT:
   3859 	case WM_T_PCH_CNP:
   3860 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3861 			reg = CSR_READ(sc, WMREG_STATUS);
   3862 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3863 				break;
   3864 			delay(100);
   3865 		}
   3866 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3867 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3868 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3869 		}
   3870 		break;
   3871 	default:
   3872 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3873 		    __func__);
   3874 		break;
   3875 	}
   3876 
   3877 	reg &= ~STATUS_LAN_INIT_DONE;
   3878 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3879 }
   3880 
   3881 void
   3882 wm_get_cfg_done(struct wm_softc *sc)
   3883 {
   3884 	int mask;
   3885 	uint32_t reg;
   3886 	int i;
   3887 
   3888 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3889 		device_xname(sc->sc_dev), __func__));
   3890 
   3891 	/* Wait for eeprom to reload */
   3892 	switch (sc->sc_type) {
   3893 	case WM_T_82542_2_0:
   3894 	case WM_T_82542_2_1:
   3895 		/* null */
   3896 		break;
   3897 	case WM_T_82543:
   3898 	case WM_T_82544:
   3899 	case WM_T_82540:
   3900 	case WM_T_82545:
   3901 	case WM_T_82545_3:
   3902 	case WM_T_82546:
   3903 	case WM_T_82546_3:
   3904 	case WM_T_82541:
   3905 	case WM_T_82541_2:
   3906 	case WM_T_82547:
   3907 	case WM_T_82547_2:
   3908 	case WM_T_82573:
   3909 	case WM_T_82574:
   3910 	case WM_T_82583:
   3911 		/* generic */
   3912 		delay(10*1000);
   3913 		break;
   3914 	case WM_T_80003:
   3915 	case WM_T_82571:
   3916 	case WM_T_82572:
   3917 	case WM_T_82575:
   3918 	case WM_T_82576:
   3919 	case WM_T_82580:
   3920 	case WM_T_I350:
   3921 	case WM_T_I354:
   3922 	case WM_T_I210:
   3923 	case WM_T_I211:
   3924 		if (sc->sc_type == WM_T_82571) {
   3925 			/* Only 82571 shares port 0 */
   3926 			mask = EEMNGCTL_CFGDONE_0;
   3927 		} else
   3928 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3929 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3930 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3931 				break;
   3932 			delay(1000);
   3933 		}
   3934 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3935 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3936 				device_xname(sc->sc_dev), __func__));
   3937 		}
   3938 		break;
   3939 	case WM_T_ICH8:
   3940 	case WM_T_ICH9:
   3941 	case WM_T_ICH10:
   3942 	case WM_T_PCH:
   3943 	case WM_T_PCH2:
   3944 	case WM_T_PCH_LPT:
   3945 	case WM_T_PCH_SPT:
   3946 	case WM_T_PCH_CNP:
   3947 		delay(10*1000);
   3948 		if (sc->sc_type >= WM_T_ICH10)
   3949 			wm_lan_init_done(sc);
   3950 		else
   3951 			wm_get_auto_rd_done(sc);
   3952 
   3953 		/* Clear PHY Reset Asserted bit */
   3954 		reg = CSR_READ(sc, WMREG_STATUS);
   3955 		if ((reg & STATUS_PHYRA) != 0)
   3956 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3957 		break;
   3958 	default:
   3959 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3960 		    __func__);
   3961 		break;
   3962 	}
   3963 }
   3964 
   3965 void
   3966 wm_phy_post_reset(struct wm_softc *sc)
   3967 {
   3968 	uint32_t reg;
   3969 
   3970 	/* This function is only for ICH8 and newer. */
   3971 	if (sc->sc_type < WM_T_ICH8)
   3972 		return;
   3973 
   3974 	if (wm_phy_resetisblocked(sc)) {
   3975 		/* XXX */
   3976 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3977 		return;
   3978 	}
   3979 
   3980 	/* Allow time for h/w to get to quiescent state after reset */
   3981 	delay(10*1000);
   3982 
   3983 	/* Perform any necessary post-reset workarounds */
   3984 	if (sc->sc_type == WM_T_PCH)
   3985 		wm_hv_phy_workarounds_ich8lan(sc);
   3986 	else if (sc->sc_type == WM_T_PCH2)
   3987 		wm_lv_phy_workarounds_ich8lan(sc);
   3988 
   3989 	/* Clear the host wakeup bit after lcd reset */
   3990 	if (sc->sc_type >= WM_T_PCH) {
   3991 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3992 		    BM_PORT_GEN_CFG);
   3993 		reg &= ~BM_WUC_HOST_WU_BIT;
   3994 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3995 		    BM_PORT_GEN_CFG, reg);
   3996 	}
   3997 
   3998 	/* Configure the LCD with the extended configuration region in NVM */
   3999 	wm_init_lcd_from_nvm(sc);
   4000 
   4001 	/* Configure the LCD with the OEM bits in NVM */
   4002 	wm_oem_bits_config_ich8lan(sc, true);
   4003 
   4004 	if (sc->sc_type == WM_T_PCH2) {
   4005 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4006 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4007 			delay(10 * 1000);
   4008 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4009 		}
   4010 		/* XXX Set EEE LPI Update Timer to 200usec */
   4011 	}
   4012 }
   4013 
   4014 /* Only for PCH and newer */
   4015 static int
   4016 wm_write_smbus_addr(struct wm_softc *sc)
   4017 {
   4018 	uint32_t strap, freq;
   4019 	uint16_t phy_data;
   4020 	int rv;
   4021 
   4022 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4023 		device_xname(sc->sc_dev), __func__));
   4024 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4025 
   4026 	strap = CSR_READ(sc, WMREG_STRAP);
   4027 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4028 
   4029 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4030 	if (rv != 0)
   4031 		return -1;
   4032 
   4033 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4034 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4035 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4036 
   4037 	if (sc->sc_phytype == WMPHY_I217) {
   4038 		/* Restore SMBus frequency */
   4039 		if (freq --) {
   4040 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4041 			    | HV_SMB_ADDR_FREQ_HIGH);
   4042 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4043 			    HV_SMB_ADDR_FREQ_LOW);
   4044 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4045 			    HV_SMB_ADDR_FREQ_HIGH);
   4046 		} else {
   4047 			DPRINTF(WM_DEBUG_INIT,
   4048 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4049 				device_xname(sc->sc_dev), __func__));
   4050 		}
   4051 	}
   4052 
   4053 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4054 	    phy_data);
   4055 }
   4056 
   4057 void
   4058 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4059 {
   4060 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4061 	uint16_t phy_page = 0;
   4062 
   4063 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4064 		device_xname(sc->sc_dev), __func__));
   4065 
   4066 	switch (sc->sc_type) {
   4067 	case WM_T_ICH8:
   4068 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4069 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4070 			return;
   4071 
   4072 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4073 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4074 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4075 			break;
   4076 		}
   4077 		/* FALLTHROUGH */
   4078 	case WM_T_PCH:
   4079 	case WM_T_PCH2:
   4080 	case WM_T_PCH_LPT:
   4081 	case WM_T_PCH_SPT:
   4082 	case WM_T_PCH_CNP:
   4083 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4084 		break;
   4085 	default:
   4086 		return;
   4087 	}
   4088 
   4089 	sc->phy.acquire(sc);
   4090 
   4091 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4092 	if ((reg & sw_cfg_mask) == 0)
   4093 		goto release;
   4094 
   4095 	/*
   4096 	 * Make sure HW does not configure LCD from PHY extended configuration
   4097 	 * before SW configuration
   4098 	 */
   4099 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4100 	if ((sc->sc_type < WM_T_PCH2)
   4101 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4102 		goto release;
   4103 
   4104 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4105 		device_xname(sc->sc_dev), __func__));
   4106 	/* word_addr is in DWORD */
   4107 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4108 
   4109 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4110 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4111 	if (cnf_size == 0)
   4112 		goto release;
   4113 
   4114 	if (((sc->sc_type == WM_T_PCH)
   4115 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4116 	    || (sc->sc_type > WM_T_PCH)) {
   4117 		/*
   4118 		 * HW configures the SMBus address and LEDs when the OEM and
   4119 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4120 		 * are cleared, SW will configure them instead.
   4121 		 */
   4122 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4123 			device_xname(sc->sc_dev), __func__));
   4124 		wm_write_smbus_addr(sc);
   4125 
   4126 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4127 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   4128 	}
   4129 
   4130 	/* Configure LCD from extended configuration region. */
   4131 	for (i = 0; i < cnf_size; i++) {
   4132 		uint16_t reg_data, reg_addr;
   4133 
   4134 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4135 			goto release;
   4136 
   4137 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4138 			goto release;
   4139 
   4140 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4141 			phy_page = reg_data;
   4142 
   4143 		reg_addr &= IGPHY_MAXREGADDR;
   4144 		reg_addr |= phy_page;
   4145 
   4146 		KASSERT(sc->phy.writereg_locked != NULL);
   4147 		sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data);
   4148 	}
   4149 
   4150 release:
   4151 	sc->phy.release(sc);
   4152 	return;
   4153 }
   4154 
   4155 /*
   4156  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4157  *  @sc:       pointer to the HW structure
   4158  *  @d0_state: boolean if entering d0 or d3 device state
   4159  *
   4160  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4161  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4162  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4163  */
   4164 int
   4165 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4166 {
   4167 	uint32_t mac_reg;
   4168 	uint16_t oem_reg;
   4169 	int rv;
   4170 
   4171 	if (sc->sc_type < WM_T_PCH)
   4172 		return 0;
   4173 
   4174 	rv = sc->phy.acquire(sc);
   4175 	if (rv != 0)
   4176 		return rv;
   4177 
   4178 	if (sc->sc_type == WM_T_PCH) {
   4179 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4180 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4181 			goto release;
   4182 	}
   4183 
   4184 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4185 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4186 		goto release;
   4187 
   4188 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4189 
   4190 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4191 	if (rv != 0)
   4192 		goto release;
   4193 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4194 
   4195 	if (d0_state) {
   4196 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4197 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4198 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4199 			oem_reg |= HV_OEM_BITS_LPLU;
   4200 	} else {
   4201 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4202 		    != 0)
   4203 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4204 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4205 		    != 0)
   4206 			oem_reg |= HV_OEM_BITS_LPLU;
   4207 	}
   4208 
   4209 	/* Set Restart auto-neg to activate the bits */
   4210 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4211 	    && (wm_phy_resetisblocked(sc) == false))
   4212 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4213 
   4214 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4215 
   4216 release:
   4217 	sc->phy.release(sc);
   4218 
   4219 	return rv;
   4220 }
   4221 
   4222 /* Init hardware bits */
   4223 void
   4224 wm_initialize_hardware_bits(struct wm_softc *sc)
   4225 {
   4226 	uint32_t tarc0, tarc1, reg;
   4227 
   4228 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4229 		device_xname(sc->sc_dev), __func__));
   4230 
   4231 	/* For 82571 variant, 80003 and ICHs */
   4232 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4233 	    || (sc->sc_type >= WM_T_80003)) {
   4234 
   4235 		/* Transmit Descriptor Control 0 */
   4236 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4237 		reg |= TXDCTL_COUNT_DESC;
   4238 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4239 
   4240 		/* Transmit Descriptor Control 1 */
   4241 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4242 		reg |= TXDCTL_COUNT_DESC;
   4243 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4244 
   4245 		/* TARC0 */
   4246 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4247 		switch (sc->sc_type) {
   4248 		case WM_T_82571:
   4249 		case WM_T_82572:
   4250 		case WM_T_82573:
   4251 		case WM_T_82574:
   4252 		case WM_T_82583:
   4253 		case WM_T_80003:
   4254 			/* Clear bits 30..27 */
   4255 			tarc0 &= ~__BITS(30, 27);
   4256 			break;
   4257 		default:
   4258 			break;
   4259 		}
   4260 
   4261 		switch (sc->sc_type) {
   4262 		case WM_T_82571:
   4263 		case WM_T_82572:
   4264 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4265 
   4266 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4267 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4268 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4269 			/* 8257[12] Errata No.7 */
   4270 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4271 
   4272 			/* TARC1 bit 28 */
   4273 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4274 				tarc1 &= ~__BIT(28);
   4275 			else
   4276 				tarc1 |= __BIT(28);
   4277 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4278 
   4279 			/*
   4280 			 * 8257[12] Errata No.13
   4281 			 * Disable Dyamic Clock Gating.
   4282 			 */
   4283 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4284 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4285 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4286 			break;
   4287 		case WM_T_82573:
   4288 		case WM_T_82574:
   4289 		case WM_T_82583:
   4290 			if ((sc->sc_type == WM_T_82574)
   4291 			    || (sc->sc_type == WM_T_82583))
   4292 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4293 
   4294 			/* Extended Device Control */
   4295 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4296 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4297 			reg |= __BIT(22);	/* Set bit 22 */
   4298 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4299 
   4300 			/* Device Control */
   4301 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4302 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4303 
   4304 			/* PCIe Control Register */
   4305 			/*
   4306 			 * 82573 Errata (unknown).
   4307 			 *
   4308 			 * 82574 Errata 25 and 82583 Errata 12
   4309 			 * "Dropped Rx Packets":
   4310 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4311 			 */
   4312 			reg = CSR_READ(sc, WMREG_GCR);
   4313 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4314 			CSR_WRITE(sc, WMREG_GCR, reg);
   4315 
   4316 			if ((sc->sc_type == WM_T_82574)
   4317 			    || (sc->sc_type == WM_T_82583)) {
   4318 				/*
   4319 				 * Document says this bit must be set for
   4320 				 * proper operation.
   4321 				 */
   4322 				reg = CSR_READ(sc, WMREG_GCR);
   4323 				reg |= __BIT(22);
   4324 				CSR_WRITE(sc, WMREG_GCR, reg);
   4325 
   4326 				/*
   4327 				 * Apply workaround for hardware errata
   4328 				 * documented in errata docs Fixes issue where
   4329 				 * some error prone or unreliable PCIe
   4330 				 * completions are occurring, particularly
   4331 				 * with ASPM enabled. Without fix, issue can
   4332 				 * cause Tx timeouts.
   4333 				 */
   4334 				reg = CSR_READ(sc, WMREG_GCR2);
   4335 				reg |= __BIT(0);
   4336 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4337 			}
   4338 			break;
   4339 		case WM_T_80003:
   4340 			/* TARC0 */
   4341 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4342 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4343 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4344 
   4345 			/* TARC1 bit 28 */
   4346 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4347 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4348 				tarc1 &= ~__BIT(28);
   4349 			else
   4350 				tarc1 |= __BIT(28);
   4351 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4352 			break;
   4353 		case WM_T_ICH8:
   4354 		case WM_T_ICH9:
   4355 		case WM_T_ICH10:
   4356 		case WM_T_PCH:
   4357 		case WM_T_PCH2:
   4358 		case WM_T_PCH_LPT:
   4359 		case WM_T_PCH_SPT:
   4360 		case WM_T_PCH_CNP:
   4361 			/* TARC0 */
   4362 			if (sc->sc_type == WM_T_ICH8) {
   4363 				/* Set TARC0 bits 29 and 28 */
   4364 				tarc0 |= __BITS(29, 28);
   4365 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4366 				tarc0 |= __BIT(29);
   4367 				/*
   4368 				 *  Drop bit 28. From Linux.
   4369 				 * See I218/I219 spec update
   4370 				 * "5. Buffer Overrun While the I219 is
   4371 				 * Processing DMA Transactions"
   4372 				 */
   4373 				tarc0 &= ~__BIT(28);
   4374 			}
   4375 			/* Set TARC0 bits 23,24,26,27 */
   4376 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4377 
   4378 			/* CTRL_EXT */
   4379 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4380 			reg |= __BIT(22);	/* Set bit 22 */
   4381 			/*
   4382 			 * Enable PHY low-power state when MAC is at D3
   4383 			 * w/o WoL
   4384 			 */
   4385 			if (sc->sc_type >= WM_T_PCH)
   4386 				reg |= CTRL_EXT_PHYPDEN;
   4387 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4388 
   4389 			/* TARC1 */
   4390 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4391 			/* bit 28 */
   4392 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4393 				tarc1 &= ~__BIT(28);
   4394 			else
   4395 				tarc1 |= __BIT(28);
   4396 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4397 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4398 
   4399 			/* Device Status */
   4400 			if (sc->sc_type == WM_T_ICH8) {
   4401 				reg = CSR_READ(sc, WMREG_STATUS);
   4402 				reg &= ~__BIT(31);
   4403 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4404 
   4405 			}
   4406 
   4407 			/* IOSFPC */
   4408 			if (sc->sc_type == WM_T_PCH_SPT) {
   4409 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4410 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4411 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4412 			}
   4413 			/*
   4414 			 * Work-around descriptor data corruption issue during
   4415 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4416 			 * capability.
   4417 			 */
   4418 			reg = CSR_READ(sc, WMREG_RFCTL);
   4419 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4420 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4421 			break;
   4422 		default:
   4423 			break;
   4424 		}
   4425 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4426 
   4427 		switch (sc->sc_type) {
   4428 		/*
   4429 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4430 		 * Avoid RSS Hash Value bug.
   4431 		 */
   4432 		case WM_T_82571:
   4433 		case WM_T_82572:
   4434 		case WM_T_82573:
   4435 		case WM_T_80003:
   4436 		case WM_T_ICH8:
   4437 			reg = CSR_READ(sc, WMREG_RFCTL);
   4438 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4439 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4440 			break;
   4441 		case WM_T_82574:
   4442 			/* use extened Rx descriptor. */
   4443 			reg = CSR_READ(sc, WMREG_RFCTL);
   4444 			reg |= WMREG_RFCTL_EXSTEN;
   4445 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4446 			break;
   4447 		default:
   4448 			break;
   4449 		}
   4450 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4451 		/*
   4452 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4453 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4454 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4455 		 * Correctly by the Device"
   4456 		 *
   4457 		 * I354(C2000) Errata AVR53:
   4458 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4459 		 * Hang"
   4460 		 */
   4461 		reg = CSR_READ(sc, WMREG_RFCTL);
   4462 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4463 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4464 	}
   4465 }
   4466 
   4467 static uint32_t
   4468 wm_rxpbs_adjust_82580(uint32_t val)
   4469 {
   4470 	uint32_t rv = 0;
   4471 
   4472 	if (val < __arraycount(wm_82580_rxpbs_table))
   4473 		rv = wm_82580_rxpbs_table[val];
   4474 
   4475 	return rv;
   4476 }
   4477 
   4478 /*
   4479  * wm_reset_phy:
   4480  *
   4481  *	generic PHY reset function.
   4482  *	Same as e1000_phy_hw_reset_generic()
   4483  */
   4484 static int
   4485 wm_reset_phy(struct wm_softc *sc)
   4486 {
   4487 	uint32_t reg;
   4488 
   4489 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4490 		device_xname(sc->sc_dev), __func__));
   4491 	if (wm_phy_resetisblocked(sc))
   4492 		return -1;
   4493 
   4494 	sc->phy.acquire(sc);
   4495 
   4496 	reg = CSR_READ(sc, WMREG_CTRL);
   4497 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4498 	CSR_WRITE_FLUSH(sc);
   4499 
   4500 	delay(sc->phy.reset_delay_us);
   4501 
   4502 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4503 	CSR_WRITE_FLUSH(sc);
   4504 
   4505 	delay(150);
   4506 
   4507 	sc->phy.release(sc);
   4508 
   4509 	wm_get_cfg_done(sc);
   4510 	wm_phy_post_reset(sc);
   4511 
   4512 	return 0;
   4513 }
   4514 
   4515 /*
   4516  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4517  * so it is enough to check sc->sc_queue[0] only.
   4518  */
   4519 static void
   4520 wm_flush_desc_rings(struct wm_softc *sc)
   4521 {
   4522 	pcireg_t preg;
   4523 	uint32_t reg;
   4524 	struct wm_txqueue *txq;
   4525 	wiseman_txdesc_t *txd;
   4526 	int nexttx;
   4527 	uint32_t rctl;
   4528 
   4529 	/* First, disable MULR fix in FEXTNVM11 */
   4530 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4531 	reg |= FEXTNVM11_DIS_MULRFIX;
   4532 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4533 
   4534 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4535 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4536 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4537 		return;
   4538 
   4539 	/* TX */
   4540 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4541 	    device_xname(sc->sc_dev), preg, reg);
   4542 	reg = CSR_READ(sc, WMREG_TCTL);
   4543 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4544 
   4545 	txq = &sc->sc_queue[0].wmq_txq;
   4546 	nexttx = txq->txq_next;
   4547 	txd = &txq->txq_descs[nexttx];
   4548 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4549 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4550 	txd->wtx_fields.wtxu_status = 0;
   4551 	txd->wtx_fields.wtxu_options = 0;
   4552 	txd->wtx_fields.wtxu_vlan = 0;
   4553 
   4554 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4555 	    BUS_SPACE_BARRIER_WRITE);
   4556 
   4557 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4558 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4559 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4560 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4561 	delay(250);
   4562 
   4563 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4564 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4565 		return;
   4566 
   4567 	/* RX */
   4568 	printf("%s: Need RX flush (reg = %08x)\n",
   4569 	    device_xname(sc->sc_dev), preg);
   4570 	rctl = CSR_READ(sc, WMREG_RCTL);
   4571 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4572 	CSR_WRITE_FLUSH(sc);
   4573 	delay(150);
   4574 
   4575 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4576 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4577 	reg &= 0xffffc000;
   4578 	/*
   4579 	 * update thresholds: prefetch threshold to 31, host threshold
   4580 	 * to 1 and make sure the granularity is "descriptors" and not
   4581 	 * "cache lines"
   4582 	 */
   4583 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4584 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4585 
   4586 	/*
   4587 	 * momentarily enable the RX ring for the changes to take
   4588 	 * effect
   4589 	 */
   4590 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4591 	CSR_WRITE_FLUSH(sc);
   4592 	delay(150);
   4593 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4594 }
   4595 
   4596 /*
   4597  * wm_reset:
   4598  *
   4599  *	Reset the i82542 chip.
   4600  */
   4601 static void
   4602 wm_reset(struct wm_softc *sc)
   4603 {
   4604 	int phy_reset = 0;
   4605 	int i, error = 0;
   4606 	uint32_t reg;
   4607 	uint16_t kmreg;
   4608 	int rv;
   4609 
   4610 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4611 		device_xname(sc->sc_dev), __func__));
   4612 	KASSERT(sc->sc_type != 0);
   4613 
   4614 	/*
   4615 	 * Allocate on-chip memory according to the MTU size.
   4616 	 * The Packet Buffer Allocation register must be written
   4617 	 * before the chip is reset.
   4618 	 */
   4619 	switch (sc->sc_type) {
   4620 	case WM_T_82547:
   4621 	case WM_T_82547_2:
   4622 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4623 		    PBA_22K : PBA_30K;
   4624 		for (i = 0; i < sc->sc_nqueues; i++) {
   4625 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4626 			txq->txq_fifo_head = 0;
   4627 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4628 			txq->txq_fifo_size =
   4629 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4630 			txq->txq_fifo_stall = 0;
   4631 		}
   4632 		break;
   4633 	case WM_T_82571:
   4634 	case WM_T_82572:
   4635 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4636 	case WM_T_80003:
   4637 		sc->sc_pba = PBA_32K;
   4638 		break;
   4639 	case WM_T_82573:
   4640 		sc->sc_pba = PBA_12K;
   4641 		break;
   4642 	case WM_T_82574:
   4643 	case WM_T_82583:
   4644 		sc->sc_pba = PBA_20K;
   4645 		break;
   4646 	case WM_T_82576:
   4647 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4648 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4649 		break;
   4650 	case WM_T_82580:
   4651 	case WM_T_I350:
   4652 	case WM_T_I354:
   4653 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4654 		break;
   4655 	case WM_T_I210:
   4656 	case WM_T_I211:
   4657 		sc->sc_pba = PBA_34K;
   4658 		break;
   4659 	case WM_T_ICH8:
   4660 		/* Workaround for a bit corruption issue in FIFO memory */
   4661 		sc->sc_pba = PBA_8K;
   4662 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4663 		break;
   4664 	case WM_T_ICH9:
   4665 	case WM_T_ICH10:
   4666 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4667 		    PBA_14K : PBA_10K;
   4668 		break;
   4669 	case WM_T_PCH:
   4670 	case WM_T_PCH2:	/* XXX 14K? */
   4671 	case WM_T_PCH_LPT:
   4672 	case WM_T_PCH_SPT:
   4673 	case WM_T_PCH_CNP:
   4674 		sc->sc_pba = PBA_26K;
   4675 		break;
   4676 	default:
   4677 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4678 		    PBA_40K : PBA_48K;
   4679 		break;
   4680 	}
   4681 	/*
   4682 	 * Only old or non-multiqueue devices have the PBA register
   4683 	 * XXX Need special handling for 82575.
   4684 	 */
   4685 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4686 	    || (sc->sc_type == WM_T_82575))
   4687 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4688 
   4689 	/* Prevent the PCI-E bus from sticking */
   4690 	if (sc->sc_flags & WM_F_PCIE) {
   4691 		int timeout = 800;
   4692 
   4693 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4694 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4695 
   4696 		while (timeout--) {
   4697 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4698 			    == 0)
   4699 				break;
   4700 			delay(100);
   4701 		}
   4702 		if (timeout == 0)
   4703 			device_printf(sc->sc_dev,
   4704 			    "failed to disable busmastering\n");
   4705 	}
   4706 
   4707 	/* Set the completion timeout for interface */
   4708 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4709 	    || (sc->sc_type == WM_T_82580)
   4710 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4711 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4712 		wm_set_pcie_completion_timeout(sc);
   4713 
   4714 	/* Clear interrupt */
   4715 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4716 	if (wm_is_using_msix(sc)) {
   4717 		if (sc->sc_type != WM_T_82574) {
   4718 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4719 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4720 		} else
   4721 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4722 	}
   4723 
   4724 	/* Stop the transmit and receive processes. */
   4725 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4726 	sc->sc_rctl &= ~RCTL_EN;
   4727 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4728 	CSR_WRITE_FLUSH(sc);
   4729 
   4730 	/* XXX set_tbi_sbp_82543() */
   4731 
   4732 	delay(10*1000);
   4733 
   4734 	/* Must acquire the MDIO ownership before MAC reset */
   4735 	switch (sc->sc_type) {
   4736 	case WM_T_82573:
   4737 	case WM_T_82574:
   4738 	case WM_T_82583:
   4739 		error = wm_get_hw_semaphore_82573(sc);
   4740 		break;
   4741 	default:
   4742 		break;
   4743 	}
   4744 
   4745 	/*
   4746 	 * 82541 Errata 29? & 82547 Errata 28?
   4747 	 * See also the description about PHY_RST bit in CTRL register
   4748 	 * in 8254x_GBe_SDM.pdf.
   4749 	 */
   4750 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4751 		CSR_WRITE(sc, WMREG_CTRL,
   4752 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4753 		CSR_WRITE_FLUSH(sc);
   4754 		delay(5000);
   4755 	}
   4756 
   4757 	switch (sc->sc_type) {
   4758 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4759 	case WM_T_82541:
   4760 	case WM_T_82541_2:
   4761 	case WM_T_82547:
   4762 	case WM_T_82547_2:
   4763 		/*
   4764 		 * On some chipsets, a reset through a memory-mapped write
   4765 		 * cycle can cause the chip to reset before completing the
   4766 		 * write cycle. This causes major headache that can be avoided
   4767 		 * by issuing the reset via indirect register writes through
   4768 		 * I/O space.
   4769 		 *
   4770 		 * So, if we successfully mapped the I/O BAR at attach time,
   4771 		 * use that. Otherwise, try our luck with a memory-mapped
   4772 		 * reset.
   4773 		 */
   4774 		if (sc->sc_flags & WM_F_IOH_VALID)
   4775 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4776 		else
   4777 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4778 		break;
   4779 	case WM_T_82545_3:
   4780 	case WM_T_82546_3:
   4781 		/* Use the shadow control register on these chips. */
   4782 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4783 		break;
   4784 	case WM_T_80003:
   4785 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4786 		sc->phy.acquire(sc);
   4787 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4788 		sc->phy.release(sc);
   4789 		break;
   4790 	case WM_T_ICH8:
   4791 	case WM_T_ICH9:
   4792 	case WM_T_ICH10:
   4793 	case WM_T_PCH:
   4794 	case WM_T_PCH2:
   4795 	case WM_T_PCH_LPT:
   4796 	case WM_T_PCH_SPT:
   4797 	case WM_T_PCH_CNP:
   4798 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4799 		if (wm_phy_resetisblocked(sc) == false) {
   4800 			/*
   4801 			 * Gate automatic PHY configuration by hardware on
   4802 			 * non-managed 82579
   4803 			 */
   4804 			if ((sc->sc_type == WM_T_PCH2)
   4805 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4806 				== 0))
   4807 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4808 
   4809 			reg |= CTRL_PHY_RESET;
   4810 			phy_reset = 1;
   4811 		} else
   4812 			printf("XXX reset is blocked!!!\n");
   4813 		sc->phy.acquire(sc);
   4814 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4815 		/* Don't insert a completion barrier when reset */
   4816 		delay(20*1000);
   4817 		mutex_exit(sc->sc_ich_phymtx);
   4818 		break;
   4819 	case WM_T_82580:
   4820 	case WM_T_I350:
   4821 	case WM_T_I354:
   4822 	case WM_T_I210:
   4823 	case WM_T_I211:
   4824 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4825 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4826 			CSR_WRITE_FLUSH(sc);
   4827 		delay(5000);
   4828 		break;
   4829 	case WM_T_82542_2_0:
   4830 	case WM_T_82542_2_1:
   4831 	case WM_T_82543:
   4832 	case WM_T_82540:
   4833 	case WM_T_82545:
   4834 	case WM_T_82546:
   4835 	case WM_T_82571:
   4836 	case WM_T_82572:
   4837 	case WM_T_82573:
   4838 	case WM_T_82574:
   4839 	case WM_T_82575:
   4840 	case WM_T_82576:
   4841 	case WM_T_82583:
   4842 	default:
   4843 		/* Everything else can safely use the documented method. */
   4844 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4845 		break;
   4846 	}
   4847 
   4848 	/* Must release the MDIO ownership after MAC reset */
   4849 	switch (sc->sc_type) {
   4850 	case WM_T_82573:
   4851 	case WM_T_82574:
   4852 	case WM_T_82583:
   4853 		if (error == 0)
   4854 			wm_put_hw_semaphore_82573(sc);
   4855 		break;
   4856 	default:
   4857 		break;
   4858 	}
   4859 
   4860 	/* Set Phy Config Counter to 50msec */
   4861 	if (sc->sc_type == WM_T_PCH2) {
   4862 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4863 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4864 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4865 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4866 	}
   4867 
   4868 	if (phy_reset != 0)
   4869 		wm_get_cfg_done(sc);
   4870 
   4871 	/* reload EEPROM */
   4872 	switch (sc->sc_type) {
   4873 	case WM_T_82542_2_0:
   4874 	case WM_T_82542_2_1:
   4875 	case WM_T_82543:
   4876 	case WM_T_82544:
   4877 		delay(10);
   4878 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4879 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4880 		CSR_WRITE_FLUSH(sc);
   4881 		delay(2000);
   4882 		break;
   4883 	case WM_T_82540:
   4884 	case WM_T_82545:
   4885 	case WM_T_82545_3:
   4886 	case WM_T_82546:
   4887 	case WM_T_82546_3:
   4888 		delay(5*1000);
   4889 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4890 		break;
   4891 	case WM_T_82541:
   4892 	case WM_T_82541_2:
   4893 	case WM_T_82547:
   4894 	case WM_T_82547_2:
   4895 		delay(20000);
   4896 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4897 		break;
   4898 	case WM_T_82571:
   4899 	case WM_T_82572:
   4900 	case WM_T_82573:
   4901 	case WM_T_82574:
   4902 	case WM_T_82583:
   4903 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4904 			delay(10);
   4905 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4906 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4907 			CSR_WRITE_FLUSH(sc);
   4908 		}
   4909 		/* check EECD_EE_AUTORD */
   4910 		wm_get_auto_rd_done(sc);
   4911 		/*
   4912 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4913 		 * is set.
   4914 		 */
   4915 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4916 		    || (sc->sc_type == WM_T_82583))
   4917 			delay(25*1000);
   4918 		break;
   4919 	case WM_T_82575:
   4920 	case WM_T_82576:
   4921 	case WM_T_82580:
   4922 	case WM_T_I350:
   4923 	case WM_T_I354:
   4924 	case WM_T_I210:
   4925 	case WM_T_I211:
   4926 	case WM_T_80003:
   4927 		/* check EECD_EE_AUTORD */
   4928 		wm_get_auto_rd_done(sc);
   4929 		break;
   4930 	case WM_T_ICH8:
   4931 	case WM_T_ICH9:
   4932 	case WM_T_ICH10:
   4933 	case WM_T_PCH:
   4934 	case WM_T_PCH2:
   4935 	case WM_T_PCH_LPT:
   4936 	case WM_T_PCH_SPT:
   4937 	case WM_T_PCH_CNP:
   4938 		break;
   4939 	default:
   4940 		panic("%s: unknown type\n", __func__);
   4941 	}
   4942 
   4943 	/* Check whether EEPROM is present or not */
   4944 	switch (sc->sc_type) {
   4945 	case WM_T_82575:
   4946 	case WM_T_82576:
   4947 	case WM_T_82580:
   4948 	case WM_T_I350:
   4949 	case WM_T_I354:
   4950 	case WM_T_ICH8:
   4951 	case WM_T_ICH9:
   4952 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4953 			/* Not found */
   4954 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4955 			if (sc->sc_type == WM_T_82575)
   4956 				wm_reset_init_script_82575(sc);
   4957 		}
   4958 		break;
   4959 	default:
   4960 		break;
   4961 	}
   4962 
   4963 	if (phy_reset != 0)
   4964 		wm_phy_post_reset(sc);
   4965 
   4966 	if ((sc->sc_type == WM_T_82580)
   4967 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4968 		/* clear global device reset status bit */
   4969 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4970 	}
   4971 
   4972 	/* Clear any pending interrupt events. */
   4973 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4974 	reg = CSR_READ(sc, WMREG_ICR);
   4975 	if (wm_is_using_msix(sc)) {
   4976 		if (sc->sc_type != WM_T_82574) {
   4977 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4978 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4979 		} else
   4980 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4981 	}
   4982 
   4983 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4984 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4985 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4986 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4987 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4988 		reg |= KABGTXD_BGSQLBIAS;
   4989 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4990 	}
   4991 
   4992 	/* reload sc_ctrl */
   4993 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4994 
   4995 	if (sc->sc_type == WM_T_I354) {
   4996 #if 0
   4997 		/* I354 uses an external PHY */
   4998 		wm_set_eee_i354(sc);
   4999 #endif
   5000 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   5001 		wm_set_eee_i350(sc);
   5002 
   5003 	/*
   5004 	 * For PCH, this write will make sure that any noise will be detected
   5005 	 * as a CRC error and be dropped rather than show up as a bad packet
   5006 	 * to the DMA engine
   5007 	 */
   5008 	if (sc->sc_type == WM_T_PCH)
   5009 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5010 
   5011 	if (sc->sc_type >= WM_T_82544)
   5012 		CSR_WRITE(sc, WMREG_WUC, 0);
   5013 
   5014 	if (sc->sc_type < WM_T_82575)
   5015 		wm_disable_aspm(sc); /* Workaround for some chips */
   5016 
   5017 	wm_reset_mdicnfg_82580(sc);
   5018 
   5019 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5020 		wm_pll_workaround_i210(sc);
   5021 
   5022 	if (sc->sc_type == WM_T_80003) {
   5023 		/* default to TRUE to enable the MDIC W/A */
   5024 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5025 
   5026 		rv = wm_kmrn_readreg(sc,
   5027 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5028 		if (rv == 0) {
   5029 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5030 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5031 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5032 			else
   5033 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5034 		}
   5035 	}
   5036 }
   5037 
   5038 /*
   5039  * wm_add_rxbuf:
   5040  *
   5041  *	Add a receive buffer to the indiciated descriptor.
   5042  */
   5043 static int
   5044 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5045 {
   5046 	struct wm_softc *sc = rxq->rxq_sc;
   5047 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5048 	struct mbuf *m;
   5049 	int error;
   5050 
   5051 	KASSERT(mutex_owned(rxq->rxq_lock));
   5052 
   5053 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5054 	if (m == NULL)
   5055 		return ENOBUFS;
   5056 
   5057 	MCLGET(m, M_DONTWAIT);
   5058 	if ((m->m_flags & M_EXT) == 0) {
   5059 		m_freem(m);
   5060 		return ENOBUFS;
   5061 	}
   5062 
   5063 	if (rxs->rxs_mbuf != NULL)
   5064 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5065 
   5066 	rxs->rxs_mbuf = m;
   5067 
   5068 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5069 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5070 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5071 	if (error) {
   5072 		/* XXX XXX XXX */
   5073 		aprint_error_dev(sc->sc_dev,
   5074 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5075 		panic("wm_add_rxbuf");
   5076 	}
   5077 
   5078 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5079 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5080 
   5081 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5082 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5083 			wm_init_rxdesc(rxq, idx);
   5084 	} else
   5085 		wm_init_rxdesc(rxq, idx);
   5086 
   5087 	return 0;
   5088 }
   5089 
   5090 /*
   5091  * wm_rxdrain:
   5092  *
   5093  *	Drain the receive queue.
   5094  */
   5095 static void
   5096 wm_rxdrain(struct wm_rxqueue *rxq)
   5097 {
   5098 	struct wm_softc *sc = rxq->rxq_sc;
   5099 	struct wm_rxsoft *rxs;
   5100 	int i;
   5101 
   5102 	KASSERT(mutex_owned(rxq->rxq_lock));
   5103 
   5104 	for (i = 0; i < WM_NRXDESC; i++) {
   5105 		rxs = &rxq->rxq_soft[i];
   5106 		if (rxs->rxs_mbuf != NULL) {
   5107 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5108 			m_freem(rxs->rxs_mbuf);
   5109 			rxs->rxs_mbuf = NULL;
   5110 		}
   5111 	}
   5112 }
   5113 
   5114 /*
   5115  * Setup registers for RSS.
   5116  *
   5117  * XXX not yet VMDq support
   5118  */
   5119 static void
   5120 wm_init_rss(struct wm_softc *sc)
   5121 {
   5122 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5123 	int i;
   5124 
   5125 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5126 
   5127 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5128 		int qid, reta_ent;
   5129 
   5130 		qid  = i % sc->sc_nqueues;
   5131 		switch (sc->sc_type) {
   5132 		case WM_T_82574:
   5133 			reta_ent = __SHIFTIN(qid,
   5134 			    RETA_ENT_QINDEX_MASK_82574);
   5135 			break;
   5136 		case WM_T_82575:
   5137 			reta_ent = __SHIFTIN(qid,
   5138 			    RETA_ENT_QINDEX1_MASK_82575);
   5139 			break;
   5140 		default:
   5141 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5142 			break;
   5143 		}
   5144 
   5145 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5146 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5147 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5148 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5149 	}
   5150 
   5151 	rss_getkey((uint8_t *)rss_key);
   5152 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5153 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5154 
   5155 	if (sc->sc_type == WM_T_82574)
   5156 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5157 	else
   5158 		mrqc = MRQC_ENABLE_RSS_MQ;
   5159 
   5160 	/*
   5161 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5162 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5163 	 */
   5164 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5165 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5166 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5167 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5168 
   5169 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5170 }
   5171 
   5172 /*
   5173  * Adjust TX and RX queue numbers which the system actulally uses.
   5174  *
   5175  * The numbers are affected by below parameters.
   5176  *     - The nubmer of hardware queues
   5177  *     - The number of MSI-X vectors (= "nvectors" argument)
   5178  *     - ncpu
   5179  */
   5180 static void
   5181 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5182 {
   5183 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5184 
   5185 	if (nvectors < 2) {
   5186 		sc->sc_nqueues = 1;
   5187 		return;
   5188 	}
   5189 
   5190 	switch (sc->sc_type) {
   5191 	case WM_T_82572:
   5192 		hw_ntxqueues = 2;
   5193 		hw_nrxqueues = 2;
   5194 		break;
   5195 	case WM_T_82574:
   5196 		hw_ntxqueues = 2;
   5197 		hw_nrxqueues = 2;
   5198 		break;
   5199 	case WM_T_82575:
   5200 		hw_ntxqueues = 4;
   5201 		hw_nrxqueues = 4;
   5202 		break;
   5203 	case WM_T_82576:
   5204 		hw_ntxqueues = 16;
   5205 		hw_nrxqueues = 16;
   5206 		break;
   5207 	case WM_T_82580:
   5208 	case WM_T_I350:
   5209 	case WM_T_I354:
   5210 		hw_ntxqueues = 8;
   5211 		hw_nrxqueues = 8;
   5212 		break;
   5213 	case WM_T_I210:
   5214 		hw_ntxqueues = 4;
   5215 		hw_nrxqueues = 4;
   5216 		break;
   5217 	case WM_T_I211:
   5218 		hw_ntxqueues = 2;
   5219 		hw_nrxqueues = 2;
   5220 		break;
   5221 		/*
   5222 		 * As below ethernet controllers does not support MSI-X,
   5223 		 * this driver let them not use multiqueue.
   5224 		 *     - WM_T_80003
   5225 		 *     - WM_T_ICH8
   5226 		 *     - WM_T_ICH9
   5227 		 *     - WM_T_ICH10
   5228 		 *     - WM_T_PCH
   5229 		 *     - WM_T_PCH2
   5230 		 *     - WM_T_PCH_LPT
   5231 		 */
   5232 	default:
   5233 		hw_ntxqueues = 1;
   5234 		hw_nrxqueues = 1;
   5235 		break;
   5236 	}
   5237 
   5238 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5239 
   5240 	/*
   5241 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5242 	 * the number of queues used actually.
   5243 	 */
   5244 	if (nvectors < hw_nqueues + 1)
   5245 		sc->sc_nqueues = nvectors - 1;
   5246 	else
   5247 		sc->sc_nqueues = hw_nqueues;
   5248 
   5249 	/*
   5250 	 * As queues more then cpus cannot improve scaling, we limit
   5251 	 * the number of queues used actually.
   5252 	 */
   5253 	if (ncpu < sc->sc_nqueues)
   5254 		sc->sc_nqueues = ncpu;
   5255 }
   5256 
   5257 static inline bool
   5258 wm_is_using_msix(struct wm_softc *sc)
   5259 {
   5260 
   5261 	return (sc->sc_nintrs > 1);
   5262 }
   5263 
   5264 static inline bool
   5265 wm_is_using_multiqueue(struct wm_softc *sc)
   5266 {
   5267 
   5268 	return (sc->sc_nqueues > 1);
   5269 }
   5270 
   5271 static int
   5272 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5273 {
   5274 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5275 	wmq->wmq_id = qidx;
   5276 	wmq->wmq_intr_idx = intr_idx;
   5277 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5278 #ifdef WM_MPSAFE
   5279 	    | SOFTINT_MPSAFE
   5280 #endif
   5281 	    , wm_handle_queue, wmq);
   5282 	if (wmq->wmq_si != NULL)
   5283 		return 0;
   5284 
   5285 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5286 	    wmq->wmq_id);
   5287 
   5288 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5289 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5290 	return ENOMEM;
   5291 }
   5292 
   5293 /*
   5294  * Both single interrupt MSI and INTx can use this function.
   5295  */
   5296 static int
   5297 wm_setup_legacy(struct wm_softc *sc)
   5298 {
   5299 	pci_chipset_tag_t pc = sc->sc_pc;
   5300 	const char *intrstr = NULL;
   5301 	char intrbuf[PCI_INTRSTR_LEN];
   5302 	int error;
   5303 
   5304 	error = wm_alloc_txrx_queues(sc);
   5305 	if (error) {
   5306 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5307 		    error);
   5308 		return ENOMEM;
   5309 	}
   5310 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5311 	    sizeof(intrbuf));
   5312 #ifdef WM_MPSAFE
   5313 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5314 #endif
   5315 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5316 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5317 	if (sc->sc_ihs[0] == NULL) {
   5318 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5319 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5320 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5321 		return ENOMEM;
   5322 	}
   5323 
   5324 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5325 	sc->sc_nintrs = 1;
   5326 
   5327 	return wm_softint_establish(sc, 0, 0);
   5328 }
   5329 
   5330 static int
   5331 wm_setup_msix(struct wm_softc *sc)
   5332 {
   5333 	void *vih;
   5334 	kcpuset_t *affinity;
   5335 	int qidx, error, intr_idx, txrx_established;
   5336 	pci_chipset_tag_t pc = sc->sc_pc;
   5337 	const char *intrstr = NULL;
   5338 	char intrbuf[PCI_INTRSTR_LEN];
   5339 	char intr_xname[INTRDEVNAMEBUF];
   5340 
   5341 	if (sc->sc_nqueues < ncpu) {
   5342 		/*
   5343 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5344 		 * interrupts start from CPU#1.
   5345 		 */
   5346 		sc->sc_affinity_offset = 1;
   5347 	} else {
   5348 		/*
   5349 		 * In this case, this device use all CPUs. So, we unify
   5350 		 * affinitied cpu_index to msix vector number for readability.
   5351 		 */
   5352 		sc->sc_affinity_offset = 0;
   5353 	}
   5354 
   5355 	error = wm_alloc_txrx_queues(sc);
   5356 	if (error) {
   5357 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5358 		    error);
   5359 		return ENOMEM;
   5360 	}
   5361 
   5362 	kcpuset_create(&affinity, false);
   5363 	intr_idx = 0;
   5364 
   5365 	/*
   5366 	 * TX and RX
   5367 	 */
   5368 	txrx_established = 0;
   5369 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5370 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5371 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5372 
   5373 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5374 		    sizeof(intrbuf));
   5375 #ifdef WM_MPSAFE
   5376 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5377 		    PCI_INTR_MPSAFE, true);
   5378 #endif
   5379 		memset(intr_xname, 0, sizeof(intr_xname));
   5380 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5381 		    device_xname(sc->sc_dev), qidx);
   5382 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5383 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5384 		if (vih == NULL) {
   5385 			aprint_error_dev(sc->sc_dev,
   5386 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5387 			    intrstr ? " at " : "",
   5388 			    intrstr ? intrstr : "");
   5389 
   5390 			goto fail;
   5391 		}
   5392 		kcpuset_zero(affinity);
   5393 		/* Round-robin affinity */
   5394 		kcpuset_set(affinity, affinity_to);
   5395 		error = interrupt_distribute(vih, affinity, NULL);
   5396 		if (error == 0) {
   5397 			aprint_normal_dev(sc->sc_dev,
   5398 			    "for TX and RX interrupting at %s affinity to %u\n",
   5399 			    intrstr, affinity_to);
   5400 		} else {
   5401 			aprint_normal_dev(sc->sc_dev,
   5402 			    "for TX and RX interrupting at %s\n", intrstr);
   5403 		}
   5404 		sc->sc_ihs[intr_idx] = vih;
   5405 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5406 			goto fail;
   5407 		txrx_established++;
   5408 		intr_idx++;
   5409 	}
   5410 
   5411 	/*
   5412 	 * LINK
   5413 	 */
   5414 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5415 	    sizeof(intrbuf));
   5416 #ifdef WM_MPSAFE
   5417 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5418 #endif
   5419 	memset(intr_xname, 0, sizeof(intr_xname));
   5420 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5421 	    device_xname(sc->sc_dev));
   5422 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5423 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5424 	if (vih == NULL) {
   5425 		aprint_error_dev(sc->sc_dev,
   5426 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5427 		    intrstr ? " at " : "",
   5428 		    intrstr ? intrstr : "");
   5429 
   5430 		goto fail;
   5431 	}
   5432 	/* keep default affinity to LINK interrupt */
   5433 	aprint_normal_dev(sc->sc_dev,
   5434 	    "for LINK interrupting at %s\n", intrstr);
   5435 	sc->sc_ihs[intr_idx] = vih;
   5436 	sc->sc_link_intr_idx = intr_idx;
   5437 
   5438 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5439 	kcpuset_destroy(affinity);
   5440 	return 0;
   5441 
   5442  fail:
   5443 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5444 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5445 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5446 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5447 	}
   5448 
   5449 	kcpuset_destroy(affinity);
   5450 	return ENOMEM;
   5451 }
   5452 
   5453 static void
   5454 wm_unset_stopping_flags(struct wm_softc *sc)
   5455 {
   5456 	int i;
   5457 
   5458 	KASSERT(WM_CORE_LOCKED(sc));
   5459 
   5460 	/*
   5461 	 * must unset stopping flags in ascending order.
   5462 	 */
   5463 	for (i = 0; i < sc->sc_nqueues; i++) {
   5464 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5465 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5466 
   5467 		mutex_enter(txq->txq_lock);
   5468 		txq->txq_stopping = false;
   5469 		mutex_exit(txq->txq_lock);
   5470 
   5471 		mutex_enter(rxq->rxq_lock);
   5472 		rxq->rxq_stopping = false;
   5473 		mutex_exit(rxq->rxq_lock);
   5474 	}
   5475 
   5476 	sc->sc_core_stopping = false;
   5477 }
   5478 
   5479 static void
   5480 wm_set_stopping_flags(struct wm_softc *sc)
   5481 {
   5482 	int i;
   5483 
   5484 	KASSERT(WM_CORE_LOCKED(sc));
   5485 
   5486 	sc->sc_core_stopping = true;
   5487 
   5488 	/*
   5489 	 * must set stopping flags in ascending order.
   5490 	 */
   5491 	for (i = 0; i < sc->sc_nqueues; i++) {
   5492 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5493 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5494 
   5495 		mutex_enter(rxq->rxq_lock);
   5496 		rxq->rxq_stopping = true;
   5497 		mutex_exit(rxq->rxq_lock);
   5498 
   5499 		mutex_enter(txq->txq_lock);
   5500 		txq->txq_stopping = true;
   5501 		mutex_exit(txq->txq_lock);
   5502 	}
   5503 }
   5504 
   5505 /*
   5506  * write interrupt interval value to ITR or EITR
   5507  */
   5508 static void
   5509 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5510 {
   5511 
   5512 	if (!wmq->wmq_set_itr)
   5513 		return;
   5514 
   5515 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5516 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5517 
   5518 		/*
   5519 		 * 82575 doesn't have CNT_INGR field.
   5520 		 * So, overwrite counter field by software.
   5521 		 */
   5522 		if (sc->sc_type == WM_T_82575)
   5523 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5524 		else
   5525 			eitr |= EITR_CNT_INGR;
   5526 
   5527 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5528 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5529 		/*
   5530 		 * 82574 has both ITR and EITR. SET EITR when we use
   5531 		 * the multi queue function with MSI-X.
   5532 		 */
   5533 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5534 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5535 	} else {
   5536 		KASSERT(wmq->wmq_id == 0);
   5537 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5538 	}
   5539 
   5540 	wmq->wmq_set_itr = false;
   5541 }
   5542 
   5543 /*
   5544  * TODO
   5545  * Below dynamic calculation of itr is almost the same as linux igb,
   5546  * however it does not fit to wm(4). So, we will have been disable AIM
   5547  * until we will find appropriate calculation of itr.
   5548  */
   5549 /*
   5550  * calculate interrupt interval value to be going to write register in
   5551  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5552  */
   5553 static void
   5554 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5555 {
   5556 #ifdef NOTYET
   5557 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5558 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5559 	uint32_t avg_size = 0;
   5560 	uint32_t new_itr;
   5561 
   5562 	if (rxq->rxq_packets)
   5563 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5564 	if (txq->txq_packets)
   5565 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5566 
   5567 	if (avg_size == 0) {
   5568 		new_itr = 450; /* restore default value */
   5569 		goto out;
   5570 	}
   5571 
   5572 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5573 	avg_size += 24;
   5574 
   5575 	/* Don't starve jumbo frames */
   5576 	avg_size = uimin(avg_size, 3000);
   5577 
   5578 	/* Give a little boost to mid-size frames */
   5579 	if ((avg_size > 300) && (avg_size < 1200))
   5580 		new_itr = avg_size / 3;
   5581 	else
   5582 		new_itr = avg_size / 2;
   5583 
   5584 out:
   5585 	/*
   5586 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5587 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5588 	 */
   5589 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5590 		new_itr *= 4;
   5591 
   5592 	if (new_itr != wmq->wmq_itr) {
   5593 		wmq->wmq_itr = new_itr;
   5594 		wmq->wmq_set_itr = true;
   5595 	} else
   5596 		wmq->wmq_set_itr = false;
   5597 
   5598 	rxq->rxq_packets = 0;
   5599 	rxq->rxq_bytes = 0;
   5600 	txq->txq_packets = 0;
   5601 	txq->txq_bytes = 0;
   5602 #endif
   5603 }
   5604 
   5605 /*
   5606  * wm_init:		[ifnet interface function]
   5607  *
   5608  *	Initialize the interface.
   5609  */
   5610 static int
   5611 wm_init(struct ifnet *ifp)
   5612 {
   5613 	struct wm_softc *sc = ifp->if_softc;
   5614 	int ret;
   5615 
   5616 	WM_CORE_LOCK(sc);
   5617 	ret = wm_init_locked(ifp);
   5618 	WM_CORE_UNLOCK(sc);
   5619 
   5620 	return ret;
   5621 }
   5622 
   5623 static int
   5624 wm_init_locked(struct ifnet *ifp)
   5625 {
   5626 	struct wm_softc *sc = ifp->if_softc;
   5627 	int i, j, trynum, error = 0;
   5628 	uint32_t reg;
   5629 
   5630 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5631 		device_xname(sc->sc_dev), __func__));
   5632 	KASSERT(WM_CORE_LOCKED(sc));
   5633 
   5634 	/*
   5635 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5636 	 * There is a small but measurable benefit to avoiding the adjusment
   5637 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5638 	 * on such platforms.  One possibility is that the DMA itself is
   5639 	 * slightly more efficient if the front of the entire packet (instead
   5640 	 * of the front of the headers) is aligned.
   5641 	 *
   5642 	 * Note we must always set align_tweak to 0 if we are using
   5643 	 * jumbo frames.
   5644 	 */
   5645 #ifdef __NO_STRICT_ALIGNMENT
   5646 	sc->sc_align_tweak = 0;
   5647 #else
   5648 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5649 		sc->sc_align_tweak = 0;
   5650 	else
   5651 		sc->sc_align_tweak = 2;
   5652 #endif /* __NO_STRICT_ALIGNMENT */
   5653 
   5654 	/* Cancel any pending I/O. */
   5655 	wm_stop_locked(ifp, 0);
   5656 
   5657 	/* update statistics before reset */
   5658 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5659 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5660 
   5661 	/* PCH_SPT hardware workaround */
   5662 	if (sc->sc_type == WM_T_PCH_SPT)
   5663 		wm_flush_desc_rings(sc);
   5664 
   5665 	/* Reset the chip to a known state. */
   5666 	wm_reset(sc);
   5667 
   5668 	/*
   5669 	 * AMT based hardware can now take control from firmware
   5670 	 * Do this after reset.
   5671 	 */
   5672 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5673 		wm_get_hw_control(sc);
   5674 
   5675 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5676 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5677 		wm_legacy_irq_quirk_spt(sc);
   5678 
   5679 	/* Init hardware bits */
   5680 	wm_initialize_hardware_bits(sc);
   5681 
   5682 	/* Reset the PHY. */
   5683 	if (sc->sc_flags & WM_F_HAS_MII)
   5684 		wm_gmii_reset(sc);
   5685 
   5686 	if (sc->sc_type >= WM_T_ICH8) {
   5687 		reg = CSR_READ(sc, WMREG_GCR);
   5688 		/*
   5689 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5690 		 * default after reset.
   5691 		 */
   5692 		if (sc->sc_type == WM_T_ICH8)
   5693 			reg |= GCR_NO_SNOOP_ALL;
   5694 		else
   5695 			reg &= ~GCR_NO_SNOOP_ALL;
   5696 		CSR_WRITE(sc, WMREG_GCR, reg);
   5697 	}
   5698 	if ((sc->sc_type >= WM_T_ICH8)
   5699 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5700 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5701 
   5702 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5703 		reg |= CTRL_EXT_RO_DIS;
   5704 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5705 	}
   5706 
   5707 	/* Calculate (E)ITR value */
   5708 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5709 		/*
   5710 		 * For NEWQUEUE's EITR (except for 82575).
   5711 		 * 82575's EITR should be set same throttling value as other
   5712 		 * old controllers' ITR because the interrupt/sec calculation
   5713 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5714 		 *
   5715 		 * 82574's EITR should be set same throttling value as ITR.
   5716 		 *
   5717 		 * For N interrupts/sec, set this value to:
   5718 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5719 		 */
   5720 		sc->sc_itr_init = 450;
   5721 	} else if (sc->sc_type >= WM_T_82543) {
   5722 		/*
   5723 		 * Set up the interrupt throttling register (units of 256ns)
   5724 		 * Note that a footnote in Intel's documentation says this
   5725 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5726 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5727 		 * that that is also true for the 1024ns units of the other
   5728 		 * interrupt-related timer registers -- so, really, we ought
   5729 		 * to divide this value by 4 when the link speed is low.
   5730 		 *
   5731 		 * XXX implement this division at link speed change!
   5732 		 */
   5733 
   5734 		/*
   5735 		 * For N interrupts/sec, set this value to:
   5736 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5737 		 * absolute and packet timer values to this value
   5738 		 * divided by 4 to get "simple timer" behavior.
   5739 		 */
   5740 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5741 	}
   5742 
   5743 	error = wm_init_txrx_queues(sc);
   5744 	if (error)
   5745 		goto out;
   5746 
   5747 	/*
   5748 	 * Clear out the VLAN table -- we don't use it (yet).
   5749 	 */
   5750 	CSR_WRITE(sc, WMREG_VET, 0);
   5751 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5752 		trynum = 10; /* Due to hw errata */
   5753 	else
   5754 		trynum = 1;
   5755 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5756 		for (j = 0; j < trynum; j++)
   5757 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5758 
   5759 	/*
   5760 	 * Set up flow-control parameters.
   5761 	 *
   5762 	 * XXX Values could probably stand some tuning.
   5763 	 */
   5764 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5765 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5766 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5767 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5768 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5769 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5770 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5771 	}
   5772 
   5773 	sc->sc_fcrtl = FCRTL_DFLT;
   5774 	if (sc->sc_type < WM_T_82543) {
   5775 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5776 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5777 	} else {
   5778 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5779 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5780 	}
   5781 
   5782 	if (sc->sc_type == WM_T_80003)
   5783 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5784 	else
   5785 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5786 
   5787 	/* Writes the control register. */
   5788 	wm_set_vlan(sc);
   5789 
   5790 	if (sc->sc_flags & WM_F_HAS_MII) {
   5791 		uint16_t kmreg;
   5792 
   5793 		switch (sc->sc_type) {
   5794 		case WM_T_80003:
   5795 		case WM_T_ICH8:
   5796 		case WM_T_ICH9:
   5797 		case WM_T_ICH10:
   5798 		case WM_T_PCH:
   5799 		case WM_T_PCH2:
   5800 		case WM_T_PCH_LPT:
   5801 		case WM_T_PCH_SPT:
   5802 		case WM_T_PCH_CNP:
   5803 			/*
   5804 			 * Set the mac to wait the maximum time between each
   5805 			 * iteration and increase the max iterations when
   5806 			 * polling the phy; this fixes erroneous timeouts at
   5807 			 * 10Mbps.
   5808 			 */
   5809 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5810 			    0xFFFF);
   5811 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5812 			    &kmreg);
   5813 			kmreg |= 0x3F;
   5814 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5815 			    kmreg);
   5816 			break;
   5817 		default:
   5818 			break;
   5819 		}
   5820 
   5821 		if (sc->sc_type == WM_T_80003) {
   5822 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5823 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5824 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5825 
   5826 			/* Bypass RX and TX FIFO's */
   5827 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5828 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5829 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5830 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5831 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5832 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5833 		}
   5834 	}
   5835 #if 0
   5836 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5837 #endif
   5838 
   5839 	/* Set up checksum offload parameters. */
   5840 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5841 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5842 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5843 		reg |= RXCSUM_IPOFL;
   5844 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5845 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5846 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5847 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5848 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5849 
   5850 	/* Set registers about MSI-X */
   5851 	if (wm_is_using_msix(sc)) {
   5852 		uint32_t ivar;
   5853 		struct wm_queue *wmq;
   5854 		int qid, qintr_idx;
   5855 
   5856 		if (sc->sc_type == WM_T_82575) {
   5857 			/* Interrupt control */
   5858 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5859 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5860 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5861 
   5862 			/* TX and RX */
   5863 			for (i = 0; i < sc->sc_nqueues; i++) {
   5864 				wmq = &sc->sc_queue[i];
   5865 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5866 				    EITR_TX_QUEUE(wmq->wmq_id)
   5867 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5868 			}
   5869 			/* Link status */
   5870 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5871 			    EITR_OTHER);
   5872 		} else if (sc->sc_type == WM_T_82574) {
   5873 			/* Interrupt control */
   5874 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5875 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5876 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5877 
   5878 			/*
   5879 			 * workaround issue with spurious interrupts
   5880 			 * in MSI-X mode.
   5881 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5882 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5883 			 */
   5884 			reg = CSR_READ(sc, WMREG_RFCTL);
   5885 			reg |= WMREG_RFCTL_ACKDIS;
   5886 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5887 
   5888 			ivar = 0;
   5889 			/* TX and RX */
   5890 			for (i = 0; i < sc->sc_nqueues; i++) {
   5891 				wmq = &sc->sc_queue[i];
   5892 				qid = wmq->wmq_id;
   5893 				qintr_idx = wmq->wmq_intr_idx;
   5894 
   5895 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5896 				    IVAR_TX_MASK_Q_82574(qid));
   5897 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5898 				    IVAR_RX_MASK_Q_82574(qid));
   5899 			}
   5900 			/* Link status */
   5901 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5902 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5903 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5904 		} else {
   5905 			/* Interrupt control */
   5906 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5907 			    | GPIE_EIAME | GPIE_PBA);
   5908 
   5909 			switch (sc->sc_type) {
   5910 			case WM_T_82580:
   5911 			case WM_T_I350:
   5912 			case WM_T_I354:
   5913 			case WM_T_I210:
   5914 			case WM_T_I211:
   5915 				/* TX and RX */
   5916 				for (i = 0; i < sc->sc_nqueues; i++) {
   5917 					wmq = &sc->sc_queue[i];
   5918 					qid = wmq->wmq_id;
   5919 					qintr_idx = wmq->wmq_intr_idx;
   5920 
   5921 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5922 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5923 					ivar |= __SHIFTIN((qintr_idx
   5924 						| IVAR_VALID),
   5925 					    IVAR_TX_MASK_Q(qid));
   5926 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5927 					ivar |= __SHIFTIN((qintr_idx
   5928 						| IVAR_VALID),
   5929 					    IVAR_RX_MASK_Q(qid));
   5930 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5931 				}
   5932 				break;
   5933 			case WM_T_82576:
   5934 				/* TX and RX */
   5935 				for (i = 0; i < sc->sc_nqueues; i++) {
   5936 					wmq = &sc->sc_queue[i];
   5937 					qid = wmq->wmq_id;
   5938 					qintr_idx = wmq->wmq_intr_idx;
   5939 
   5940 					ivar = CSR_READ(sc,
   5941 					    WMREG_IVAR_Q_82576(qid));
   5942 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5943 					ivar |= __SHIFTIN((qintr_idx
   5944 						| IVAR_VALID),
   5945 					    IVAR_TX_MASK_Q_82576(qid));
   5946 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5947 					ivar |= __SHIFTIN((qintr_idx
   5948 						| IVAR_VALID),
   5949 					    IVAR_RX_MASK_Q_82576(qid));
   5950 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5951 					    ivar);
   5952 				}
   5953 				break;
   5954 			default:
   5955 				break;
   5956 			}
   5957 
   5958 			/* Link status */
   5959 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5960 			    IVAR_MISC_OTHER);
   5961 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5962 		}
   5963 
   5964 		if (wm_is_using_multiqueue(sc)) {
   5965 			wm_init_rss(sc);
   5966 
   5967 			/*
   5968 			** NOTE: Receive Full-Packet Checksum Offload
   5969 			** is mutually exclusive with Multiqueue. However
   5970 			** this is not the same as TCP/IP checksums which
   5971 			** still work.
   5972 			*/
   5973 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5974 			reg |= RXCSUM_PCSD;
   5975 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5976 		}
   5977 	}
   5978 
   5979 	/* Set up the interrupt registers. */
   5980 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5981 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5982 	    ICR_RXO | ICR_RXT0;
   5983 	if (wm_is_using_msix(sc)) {
   5984 		uint32_t mask;
   5985 		struct wm_queue *wmq;
   5986 
   5987 		switch (sc->sc_type) {
   5988 		case WM_T_82574:
   5989 			mask = 0;
   5990 			for (i = 0; i < sc->sc_nqueues; i++) {
   5991 				wmq = &sc->sc_queue[i];
   5992 				mask |= ICR_TXQ(wmq->wmq_id);
   5993 				mask |= ICR_RXQ(wmq->wmq_id);
   5994 			}
   5995 			mask |= ICR_OTHER;
   5996 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5997 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5998 			break;
   5999 		default:
   6000 			if (sc->sc_type == WM_T_82575) {
   6001 				mask = 0;
   6002 				for (i = 0; i < sc->sc_nqueues; i++) {
   6003 					wmq = &sc->sc_queue[i];
   6004 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6005 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6006 				}
   6007 				mask |= EITR_OTHER;
   6008 			} else {
   6009 				mask = 0;
   6010 				for (i = 0; i < sc->sc_nqueues; i++) {
   6011 					wmq = &sc->sc_queue[i];
   6012 					mask |= 1 << wmq->wmq_intr_idx;
   6013 				}
   6014 				mask |= 1 << sc->sc_link_intr_idx;
   6015 			}
   6016 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6017 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6018 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6019 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6020 			break;
   6021 		}
   6022 	} else
   6023 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6024 
   6025 	/* Set up the inter-packet gap. */
   6026 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6027 
   6028 	if (sc->sc_type >= WM_T_82543) {
   6029 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6030 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6031 			wm_itrs_writereg(sc, wmq);
   6032 		}
   6033 		/*
   6034 		 * Link interrupts occur much less than TX
   6035 		 * interrupts and RX interrupts. So, we don't
   6036 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6037 		 * FreeBSD's if_igb.
   6038 		 */
   6039 	}
   6040 
   6041 	/* Set the VLAN ethernetype. */
   6042 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6043 
   6044 	/*
   6045 	 * Set up the transmit control register; we start out with
   6046 	 * a collision distance suitable for FDX, but update it whe
   6047 	 * we resolve the media type.
   6048 	 */
   6049 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6050 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6051 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6052 	if (sc->sc_type >= WM_T_82571)
   6053 		sc->sc_tctl |= TCTL_MULR;
   6054 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6055 
   6056 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6057 		/* Write TDT after TCTL.EN is set. See the document. */
   6058 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6059 	}
   6060 
   6061 	if (sc->sc_type == WM_T_80003) {
   6062 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6063 		reg &= ~TCTL_EXT_GCEX_MASK;
   6064 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6065 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6066 	}
   6067 
   6068 	/* Set the media. */
   6069 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6070 		goto out;
   6071 
   6072 	/* Configure for OS presence */
   6073 	wm_init_manageability(sc);
   6074 
   6075 	/*
   6076 	 * Set up the receive control register; we actually program the
   6077 	 * register when we set the receive filter. Use multicast address
   6078 	 * offset type 0.
   6079 	 *
   6080 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6081 	 * don't enable that feature.
   6082 	 */
   6083 	sc->sc_mchash_type = 0;
   6084 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6085 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6086 
   6087 	/*
   6088 	 * 82574 use one buffer extended Rx descriptor.
   6089 	 */
   6090 	if (sc->sc_type == WM_T_82574)
   6091 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6092 
   6093 	/*
   6094 	 * The I350 has a bug where it always strips the CRC whether
   6095 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6096 	 */
   6097 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6098 	    || (sc->sc_type == WM_T_I210))
   6099 		sc->sc_rctl |= RCTL_SECRC;
   6100 
   6101 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6102 	    && (ifp->if_mtu > ETHERMTU)) {
   6103 		sc->sc_rctl |= RCTL_LPE;
   6104 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6105 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6106 	}
   6107 
   6108 	if (MCLBYTES == 2048)
   6109 		sc->sc_rctl |= RCTL_2k;
   6110 	else {
   6111 		if (sc->sc_type >= WM_T_82543) {
   6112 			switch (MCLBYTES) {
   6113 			case 4096:
   6114 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6115 				break;
   6116 			case 8192:
   6117 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6118 				break;
   6119 			case 16384:
   6120 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6121 				break;
   6122 			default:
   6123 				panic("wm_init: MCLBYTES %d unsupported",
   6124 				    MCLBYTES);
   6125 				break;
   6126 			}
   6127 		} else
   6128 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6129 	}
   6130 
   6131 	/* Enable ECC */
   6132 	switch (sc->sc_type) {
   6133 	case WM_T_82571:
   6134 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6135 		reg |= PBA_ECC_CORR_EN;
   6136 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6137 		break;
   6138 	case WM_T_PCH_LPT:
   6139 	case WM_T_PCH_SPT:
   6140 	case WM_T_PCH_CNP:
   6141 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6142 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6143 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6144 
   6145 		sc->sc_ctrl |= CTRL_MEHE;
   6146 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6147 		break;
   6148 	default:
   6149 		break;
   6150 	}
   6151 
   6152 	/*
   6153 	 * Set the receive filter.
   6154 	 *
   6155 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6156 	 * the setting of RCTL.EN in wm_set_filter()
   6157 	 */
   6158 	wm_set_filter(sc);
   6159 
   6160 	/* On 575 and later set RDT only if RX enabled */
   6161 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6162 		int qidx;
   6163 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6164 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6165 			for (i = 0; i < WM_NRXDESC; i++) {
   6166 				mutex_enter(rxq->rxq_lock);
   6167 				wm_init_rxdesc(rxq, i);
   6168 				mutex_exit(rxq->rxq_lock);
   6169 
   6170 			}
   6171 		}
   6172 	}
   6173 
   6174 	wm_unset_stopping_flags(sc);
   6175 
   6176 	/* Start the one second link check clock. */
   6177 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6178 
   6179 	/* ...all done! */
   6180 	ifp->if_flags |= IFF_RUNNING;
   6181 	ifp->if_flags &= ~IFF_OACTIVE;
   6182 
   6183  out:
   6184 	sc->sc_if_flags = ifp->if_flags;
   6185 	if (error)
   6186 		log(LOG_ERR, "%s: interface not running\n",
   6187 		    device_xname(sc->sc_dev));
   6188 	return error;
   6189 }
   6190 
   6191 /*
   6192  * wm_stop:		[ifnet interface function]
   6193  *
   6194  *	Stop transmission on the interface.
   6195  */
   6196 static void
   6197 wm_stop(struct ifnet *ifp, int disable)
   6198 {
   6199 	struct wm_softc *sc = ifp->if_softc;
   6200 
   6201 	WM_CORE_LOCK(sc);
   6202 	wm_stop_locked(ifp, disable);
   6203 	WM_CORE_UNLOCK(sc);
   6204 }
   6205 
   6206 static void
   6207 wm_stop_locked(struct ifnet *ifp, int disable)
   6208 {
   6209 	struct wm_softc *sc = ifp->if_softc;
   6210 	struct wm_txsoft *txs;
   6211 	int i, qidx;
   6212 
   6213 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6214 		device_xname(sc->sc_dev), __func__));
   6215 	KASSERT(WM_CORE_LOCKED(sc));
   6216 
   6217 	wm_set_stopping_flags(sc);
   6218 
   6219 	/* Stop the one second clock. */
   6220 	callout_stop(&sc->sc_tick_ch);
   6221 
   6222 	/* Stop the 82547 Tx FIFO stall check timer. */
   6223 	if (sc->sc_type == WM_T_82547)
   6224 		callout_stop(&sc->sc_txfifo_ch);
   6225 
   6226 	if (sc->sc_flags & WM_F_HAS_MII) {
   6227 		/* Down the MII. */
   6228 		mii_down(&sc->sc_mii);
   6229 	} else {
   6230 #if 0
   6231 		/* Should we clear PHY's status properly? */
   6232 		wm_reset(sc);
   6233 #endif
   6234 	}
   6235 
   6236 	/* Stop the transmit and receive processes. */
   6237 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6238 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6239 	sc->sc_rctl &= ~RCTL_EN;
   6240 
   6241 	/*
   6242 	 * Clear the interrupt mask to ensure the device cannot assert its
   6243 	 * interrupt line.
   6244 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6245 	 * service any currently pending or shared interrupt.
   6246 	 */
   6247 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6248 	sc->sc_icr = 0;
   6249 	if (wm_is_using_msix(sc)) {
   6250 		if (sc->sc_type != WM_T_82574) {
   6251 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6252 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6253 		} else
   6254 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6255 	}
   6256 
   6257 	/* Release any queued transmit buffers. */
   6258 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6259 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6260 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6261 		mutex_enter(txq->txq_lock);
   6262 		txq->txq_sending = false; /* ensure watchdog disabled */
   6263 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6264 			txs = &txq->txq_soft[i];
   6265 			if (txs->txs_mbuf != NULL) {
   6266 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6267 				m_freem(txs->txs_mbuf);
   6268 				txs->txs_mbuf = NULL;
   6269 			}
   6270 		}
   6271 		mutex_exit(txq->txq_lock);
   6272 	}
   6273 
   6274 	/* Mark the interface as down and cancel the watchdog timer. */
   6275 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6276 
   6277 	if (disable) {
   6278 		for (i = 0; i < sc->sc_nqueues; i++) {
   6279 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6280 			mutex_enter(rxq->rxq_lock);
   6281 			wm_rxdrain(rxq);
   6282 			mutex_exit(rxq->rxq_lock);
   6283 		}
   6284 	}
   6285 
   6286 #if 0 /* notyet */
   6287 	if (sc->sc_type >= WM_T_82544)
   6288 		CSR_WRITE(sc, WMREG_WUC, 0);
   6289 #endif
   6290 }
   6291 
   6292 static void
   6293 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6294 {
   6295 	struct mbuf *m;
   6296 	int i;
   6297 
   6298 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6299 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6300 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6301 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6302 		    m->m_data, m->m_len, m->m_flags);
   6303 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6304 	    i, i == 1 ? "" : "s");
   6305 }
   6306 
   6307 /*
   6308  * wm_82547_txfifo_stall:
   6309  *
   6310  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6311  *	reset the FIFO pointers, and restart packet transmission.
   6312  */
   6313 static void
   6314 wm_82547_txfifo_stall(void *arg)
   6315 {
   6316 	struct wm_softc *sc = arg;
   6317 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6318 
   6319 	mutex_enter(txq->txq_lock);
   6320 
   6321 	if (txq->txq_stopping)
   6322 		goto out;
   6323 
   6324 	if (txq->txq_fifo_stall) {
   6325 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6326 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6327 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6328 			/*
   6329 			 * Packets have drained.  Stop transmitter, reset
   6330 			 * FIFO pointers, restart transmitter, and kick
   6331 			 * the packet queue.
   6332 			 */
   6333 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6334 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6335 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6336 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6337 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6338 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6339 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6340 			CSR_WRITE_FLUSH(sc);
   6341 
   6342 			txq->txq_fifo_head = 0;
   6343 			txq->txq_fifo_stall = 0;
   6344 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6345 		} else {
   6346 			/*
   6347 			 * Still waiting for packets to drain; try again in
   6348 			 * another tick.
   6349 			 */
   6350 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6351 		}
   6352 	}
   6353 
   6354 out:
   6355 	mutex_exit(txq->txq_lock);
   6356 }
   6357 
   6358 /*
   6359  * wm_82547_txfifo_bugchk:
   6360  *
   6361  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6362  *	prevent enqueueing a packet that would wrap around the end
   6363  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6364  *
   6365  *	We do this by checking the amount of space before the end
   6366  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6367  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6368  *	the internal FIFO pointers to the beginning, and restart
   6369  *	transmission on the interface.
   6370  */
   6371 #define	WM_FIFO_HDR		0x10
   6372 #define	WM_82547_PAD_LEN	0x3e0
   6373 static int
   6374 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6375 {
   6376 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6377 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6378 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6379 
   6380 	/* Just return if already stalled. */
   6381 	if (txq->txq_fifo_stall)
   6382 		return 1;
   6383 
   6384 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6385 		/* Stall only occurs in half-duplex mode. */
   6386 		goto send_packet;
   6387 	}
   6388 
   6389 	if (len >= WM_82547_PAD_LEN + space) {
   6390 		txq->txq_fifo_stall = 1;
   6391 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6392 		return 1;
   6393 	}
   6394 
   6395  send_packet:
   6396 	txq->txq_fifo_head += len;
   6397 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6398 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6399 
   6400 	return 0;
   6401 }
   6402 
   6403 static int
   6404 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6405 {
   6406 	int error;
   6407 
   6408 	/*
   6409 	 * Allocate the control data structures, and create and load the
   6410 	 * DMA map for it.
   6411 	 *
   6412 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6413 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6414 	 * both sets within the same 4G segment.
   6415 	 */
   6416 	if (sc->sc_type < WM_T_82544)
   6417 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6418 	else
   6419 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6420 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6421 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6422 	else
   6423 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6424 
   6425 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6426 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6427 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6428 		aprint_error_dev(sc->sc_dev,
   6429 		    "unable to allocate TX control data, error = %d\n",
   6430 		    error);
   6431 		goto fail_0;
   6432 	}
   6433 
   6434 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6435 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6436 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6437 		aprint_error_dev(sc->sc_dev,
   6438 		    "unable to map TX control data, error = %d\n", error);
   6439 		goto fail_1;
   6440 	}
   6441 
   6442 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6443 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6444 		aprint_error_dev(sc->sc_dev,
   6445 		    "unable to create TX control data DMA map, error = %d\n",
   6446 		    error);
   6447 		goto fail_2;
   6448 	}
   6449 
   6450 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6451 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6452 		aprint_error_dev(sc->sc_dev,
   6453 		    "unable to load TX control data DMA map, error = %d\n",
   6454 		    error);
   6455 		goto fail_3;
   6456 	}
   6457 
   6458 	return 0;
   6459 
   6460  fail_3:
   6461 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6462  fail_2:
   6463 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6464 	    WM_TXDESCS_SIZE(txq));
   6465  fail_1:
   6466 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6467  fail_0:
   6468 	return error;
   6469 }
   6470 
   6471 static void
   6472 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6473 {
   6474 
   6475 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6476 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6477 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6478 	    WM_TXDESCS_SIZE(txq));
   6479 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6480 }
   6481 
   6482 static int
   6483 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6484 {
   6485 	int error;
   6486 	size_t rxq_descs_size;
   6487 
   6488 	/*
   6489 	 * Allocate the control data structures, and create and load the
   6490 	 * DMA map for it.
   6491 	 *
   6492 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6493 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6494 	 * both sets within the same 4G segment.
   6495 	 */
   6496 	rxq->rxq_ndesc = WM_NRXDESC;
   6497 	if (sc->sc_type == WM_T_82574)
   6498 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6499 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6500 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6501 	else
   6502 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6503 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6504 
   6505 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6506 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6507 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6508 		aprint_error_dev(sc->sc_dev,
   6509 		    "unable to allocate RX control data, error = %d\n",
   6510 		    error);
   6511 		goto fail_0;
   6512 	}
   6513 
   6514 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6515 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6516 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6517 		aprint_error_dev(sc->sc_dev,
   6518 		    "unable to map RX control data, error = %d\n", error);
   6519 		goto fail_1;
   6520 	}
   6521 
   6522 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6523 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6524 		aprint_error_dev(sc->sc_dev,
   6525 		    "unable to create RX control data DMA map, error = %d\n",
   6526 		    error);
   6527 		goto fail_2;
   6528 	}
   6529 
   6530 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6531 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6532 		aprint_error_dev(sc->sc_dev,
   6533 		    "unable to load RX control data DMA map, error = %d\n",
   6534 		    error);
   6535 		goto fail_3;
   6536 	}
   6537 
   6538 	return 0;
   6539 
   6540  fail_3:
   6541 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6542  fail_2:
   6543 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6544 	    rxq_descs_size);
   6545  fail_1:
   6546 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6547  fail_0:
   6548 	return error;
   6549 }
   6550 
   6551 static void
   6552 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6553 {
   6554 
   6555 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6556 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6557 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6558 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6559 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6560 }
   6561 
   6562 
   6563 static int
   6564 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6565 {
   6566 	int i, error;
   6567 
   6568 	/* Create the transmit buffer DMA maps. */
   6569 	WM_TXQUEUELEN(txq) =
   6570 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6571 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6572 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6573 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6574 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6575 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6576 			aprint_error_dev(sc->sc_dev,
   6577 			    "unable to create Tx DMA map %d, error = %d\n",
   6578 			    i, error);
   6579 			goto fail;
   6580 		}
   6581 	}
   6582 
   6583 	return 0;
   6584 
   6585  fail:
   6586 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6587 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6588 			bus_dmamap_destroy(sc->sc_dmat,
   6589 			    txq->txq_soft[i].txs_dmamap);
   6590 	}
   6591 	return error;
   6592 }
   6593 
   6594 static void
   6595 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6596 {
   6597 	int i;
   6598 
   6599 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6600 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6601 			bus_dmamap_destroy(sc->sc_dmat,
   6602 			    txq->txq_soft[i].txs_dmamap);
   6603 	}
   6604 }
   6605 
   6606 static int
   6607 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6608 {
   6609 	int i, error;
   6610 
   6611 	/* Create the receive buffer DMA maps. */
   6612 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6613 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6614 			    MCLBYTES, 0, 0,
   6615 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6616 			aprint_error_dev(sc->sc_dev,
   6617 			    "unable to create Rx DMA map %d error = %d\n",
   6618 			    i, error);
   6619 			goto fail;
   6620 		}
   6621 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6622 	}
   6623 
   6624 	return 0;
   6625 
   6626  fail:
   6627 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6628 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6629 			bus_dmamap_destroy(sc->sc_dmat,
   6630 			    rxq->rxq_soft[i].rxs_dmamap);
   6631 	}
   6632 	return error;
   6633 }
   6634 
   6635 static void
   6636 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6637 {
   6638 	int i;
   6639 
   6640 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6641 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6642 			bus_dmamap_destroy(sc->sc_dmat,
   6643 			    rxq->rxq_soft[i].rxs_dmamap);
   6644 	}
   6645 }
   6646 
   6647 /*
   6648  * wm_alloc_quques:
   6649  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6650  */
   6651 static int
   6652 wm_alloc_txrx_queues(struct wm_softc *sc)
   6653 {
   6654 	int i, error, tx_done, rx_done;
   6655 
   6656 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6657 	    KM_SLEEP);
   6658 	if (sc->sc_queue == NULL) {
   6659 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6660 		error = ENOMEM;
   6661 		goto fail_0;
   6662 	}
   6663 
   6664 	/*
   6665 	 * For transmission
   6666 	 */
   6667 	error = 0;
   6668 	tx_done = 0;
   6669 	for (i = 0; i < sc->sc_nqueues; i++) {
   6670 #ifdef WM_EVENT_COUNTERS
   6671 		int j;
   6672 		const char *xname;
   6673 #endif
   6674 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6675 		txq->txq_sc = sc;
   6676 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6677 
   6678 		error = wm_alloc_tx_descs(sc, txq);
   6679 		if (error)
   6680 			break;
   6681 		error = wm_alloc_tx_buffer(sc, txq);
   6682 		if (error) {
   6683 			wm_free_tx_descs(sc, txq);
   6684 			break;
   6685 		}
   6686 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6687 		if (txq->txq_interq == NULL) {
   6688 			wm_free_tx_descs(sc, txq);
   6689 			wm_free_tx_buffer(sc, txq);
   6690 			error = ENOMEM;
   6691 			break;
   6692 		}
   6693 
   6694 #ifdef WM_EVENT_COUNTERS
   6695 		xname = device_xname(sc->sc_dev);
   6696 
   6697 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6698 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6699 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6700 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6701 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6702 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6703 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6704 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6705 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6706 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6707 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6708 
   6709 		for (j = 0; j < WM_NTXSEGS; j++) {
   6710 			snprintf(txq->txq_txseg_evcnt_names[j],
   6711 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6712 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6713 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6714 		}
   6715 
   6716 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6717 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6718 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6719 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6720 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6721 #endif /* WM_EVENT_COUNTERS */
   6722 
   6723 		tx_done++;
   6724 	}
   6725 	if (error)
   6726 		goto fail_1;
   6727 
   6728 	/*
   6729 	 * For recieve
   6730 	 */
   6731 	error = 0;
   6732 	rx_done = 0;
   6733 	for (i = 0; i < sc->sc_nqueues; i++) {
   6734 #ifdef WM_EVENT_COUNTERS
   6735 		const char *xname;
   6736 #endif
   6737 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6738 		rxq->rxq_sc = sc;
   6739 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6740 
   6741 		error = wm_alloc_rx_descs(sc, rxq);
   6742 		if (error)
   6743 			break;
   6744 
   6745 		error = wm_alloc_rx_buffer(sc, rxq);
   6746 		if (error) {
   6747 			wm_free_rx_descs(sc, rxq);
   6748 			break;
   6749 		}
   6750 
   6751 #ifdef WM_EVENT_COUNTERS
   6752 		xname = device_xname(sc->sc_dev);
   6753 
   6754 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6755 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6756 
   6757 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6758 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6759 #endif /* WM_EVENT_COUNTERS */
   6760 
   6761 		rx_done++;
   6762 	}
   6763 	if (error)
   6764 		goto fail_2;
   6765 
   6766 	return 0;
   6767 
   6768  fail_2:
   6769 	for (i = 0; i < rx_done; i++) {
   6770 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6771 		wm_free_rx_buffer(sc, rxq);
   6772 		wm_free_rx_descs(sc, rxq);
   6773 		if (rxq->rxq_lock)
   6774 			mutex_obj_free(rxq->rxq_lock);
   6775 	}
   6776  fail_1:
   6777 	for (i = 0; i < tx_done; i++) {
   6778 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6779 		pcq_destroy(txq->txq_interq);
   6780 		wm_free_tx_buffer(sc, txq);
   6781 		wm_free_tx_descs(sc, txq);
   6782 		if (txq->txq_lock)
   6783 			mutex_obj_free(txq->txq_lock);
   6784 	}
   6785 
   6786 	kmem_free(sc->sc_queue,
   6787 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6788  fail_0:
   6789 	return error;
   6790 }
   6791 
   6792 /*
   6793  * wm_free_quques:
   6794  *	Free {tx,rx}descs and {tx,rx} buffers
   6795  */
   6796 static void
   6797 wm_free_txrx_queues(struct wm_softc *sc)
   6798 {
   6799 	int i;
   6800 
   6801 	for (i = 0; i < sc->sc_nqueues; i++) {
   6802 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6803 
   6804 #ifdef WM_EVENT_COUNTERS
   6805 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6806 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6807 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6808 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6809 #endif /* WM_EVENT_COUNTERS */
   6810 
   6811 		wm_free_rx_buffer(sc, rxq);
   6812 		wm_free_rx_descs(sc, rxq);
   6813 		if (rxq->rxq_lock)
   6814 			mutex_obj_free(rxq->rxq_lock);
   6815 	}
   6816 
   6817 	for (i = 0; i < sc->sc_nqueues; i++) {
   6818 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6819 		struct mbuf *m;
   6820 #ifdef WM_EVENT_COUNTERS
   6821 		int j;
   6822 
   6823 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6824 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6825 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6826 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6827 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6828 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6829 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6830 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6831 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6832 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6833 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6834 
   6835 		for (j = 0; j < WM_NTXSEGS; j++)
   6836 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6837 
   6838 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6839 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6840 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6841 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6842 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6843 #endif /* WM_EVENT_COUNTERS */
   6844 
   6845 		/* drain txq_interq */
   6846 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6847 			m_freem(m);
   6848 		pcq_destroy(txq->txq_interq);
   6849 
   6850 		wm_free_tx_buffer(sc, txq);
   6851 		wm_free_tx_descs(sc, txq);
   6852 		if (txq->txq_lock)
   6853 			mutex_obj_free(txq->txq_lock);
   6854 	}
   6855 
   6856 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6857 }
   6858 
   6859 static void
   6860 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6861 {
   6862 
   6863 	KASSERT(mutex_owned(txq->txq_lock));
   6864 
   6865 	/* Initialize the transmit descriptor ring. */
   6866 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6867 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6868 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6869 	txq->txq_free = WM_NTXDESC(txq);
   6870 	txq->txq_next = 0;
   6871 }
   6872 
   6873 static void
   6874 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6875     struct wm_txqueue *txq)
   6876 {
   6877 
   6878 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6879 		device_xname(sc->sc_dev), __func__));
   6880 	KASSERT(mutex_owned(txq->txq_lock));
   6881 
   6882 	if (sc->sc_type < WM_T_82543) {
   6883 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6884 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6885 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6886 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6887 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6888 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6889 	} else {
   6890 		int qid = wmq->wmq_id;
   6891 
   6892 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6893 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6894 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6895 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6896 
   6897 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6898 			/*
   6899 			 * Don't write TDT before TCTL.EN is set.
   6900 			 * See the document.
   6901 			 */
   6902 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6903 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6904 			    | TXDCTL_WTHRESH(0));
   6905 		else {
   6906 			/* XXX should update with AIM? */
   6907 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6908 			if (sc->sc_type >= WM_T_82540) {
   6909 				/* should be same */
   6910 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6911 			}
   6912 
   6913 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6914 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6915 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6916 		}
   6917 	}
   6918 }
   6919 
   6920 static void
   6921 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6922 {
   6923 	int i;
   6924 
   6925 	KASSERT(mutex_owned(txq->txq_lock));
   6926 
   6927 	/* Initialize the transmit job descriptors. */
   6928 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6929 		txq->txq_soft[i].txs_mbuf = NULL;
   6930 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6931 	txq->txq_snext = 0;
   6932 	txq->txq_sdirty = 0;
   6933 }
   6934 
   6935 static void
   6936 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6937     struct wm_txqueue *txq)
   6938 {
   6939 
   6940 	KASSERT(mutex_owned(txq->txq_lock));
   6941 
   6942 	/*
   6943 	 * Set up some register offsets that are different between
   6944 	 * the i82542 and the i82543 and later chips.
   6945 	 */
   6946 	if (sc->sc_type < WM_T_82543)
   6947 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6948 	else
   6949 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6950 
   6951 	wm_init_tx_descs(sc, txq);
   6952 	wm_init_tx_regs(sc, wmq, txq);
   6953 	wm_init_tx_buffer(sc, txq);
   6954 
   6955 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6956 	txq->txq_sending = false;
   6957 }
   6958 
   6959 static void
   6960 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6961     struct wm_rxqueue *rxq)
   6962 {
   6963 
   6964 	KASSERT(mutex_owned(rxq->rxq_lock));
   6965 
   6966 	/*
   6967 	 * Initialize the receive descriptor and receive job
   6968 	 * descriptor rings.
   6969 	 */
   6970 	if (sc->sc_type < WM_T_82543) {
   6971 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6972 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6973 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6974 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6975 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6976 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6977 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6978 
   6979 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6980 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6981 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6982 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6983 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6984 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6985 	} else {
   6986 		int qid = wmq->wmq_id;
   6987 
   6988 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6989 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6990 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6991 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6992 
   6993 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6994 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6995 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6996 
   6997 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6998 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6999 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7000 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7001 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7002 			    | RXDCTL_WTHRESH(1));
   7003 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7004 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7005 		} else {
   7006 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7007 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7008 			/* XXX should update with AIM? */
   7009 			CSR_WRITE(sc, WMREG_RDTR,
   7010 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7011 			/* MUST be same */
   7012 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7013 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7014 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7015 		}
   7016 	}
   7017 }
   7018 
   7019 static int
   7020 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7021 {
   7022 	struct wm_rxsoft *rxs;
   7023 	int error, i;
   7024 
   7025 	KASSERT(mutex_owned(rxq->rxq_lock));
   7026 
   7027 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7028 		rxs = &rxq->rxq_soft[i];
   7029 		if (rxs->rxs_mbuf == NULL) {
   7030 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7031 				log(LOG_ERR, "%s: unable to allocate or map "
   7032 				    "rx buffer %d, error = %d\n",
   7033 				    device_xname(sc->sc_dev), i, error);
   7034 				/*
   7035 				 * XXX Should attempt to run with fewer receive
   7036 				 * XXX buffers instead of just failing.
   7037 				 */
   7038 				wm_rxdrain(rxq);
   7039 				return ENOMEM;
   7040 			}
   7041 		} else {
   7042 			/*
   7043 			 * For 82575 and 82576, the RX descriptors must be
   7044 			 * initialized after the setting of RCTL.EN in
   7045 			 * wm_set_filter()
   7046 			 */
   7047 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7048 				wm_init_rxdesc(rxq, i);
   7049 		}
   7050 	}
   7051 	rxq->rxq_ptr = 0;
   7052 	rxq->rxq_discard = 0;
   7053 	WM_RXCHAIN_RESET(rxq);
   7054 
   7055 	return 0;
   7056 }
   7057 
   7058 static int
   7059 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7060     struct wm_rxqueue *rxq)
   7061 {
   7062 
   7063 	KASSERT(mutex_owned(rxq->rxq_lock));
   7064 
   7065 	/*
   7066 	 * Set up some register offsets that are different between
   7067 	 * the i82542 and the i82543 and later chips.
   7068 	 */
   7069 	if (sc->sc_type < WM_T_82543)
   7070 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7071 	else
   7072 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7073 
   7074 	wm_init_rx_regs(sc, wmq, rxq);
   7075 	return wm_init_rx_buffer(sc, rxq);
   7076 }
   7077 
   7078 /*
   7079  * wm_init_quques:
   7080  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7081  */
   7082 static int
   7083 wm_init_txrx_queues(struct wm_softc *sc)
   7084 {
   7085 	int i, error = 0;
   7086 
   7087 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7088 		device_xname(sc->sc_dev), __func__));
   7089 
   7090 	for (i = 0; i < sc->sc_nqueues; i++) {
   7091 		struct wm_queue *wmq = &sc->sc_queue[i];
   7092 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7093 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7094 
   7095 		/*
   7096 		 * TODO
   7097 		 * Currently, use constant variable instead of AIM.
   7098 		 * Furthermore, the interrupt interval of multiqueue which use
   7099 		 * polling mode is less than default value.
   7100 		 * More tuning and AIM are required.
   7101 		 */
   7102 		if (wm_is_using_multiqueue(sc))
   7103 			wmq->wmq_itr = 50;
   7104 		else
   7105 			wmq->wmq_itr = sc->sc_itr_init;
   7106 		wmq->wmq_set_itr = true;
   7107 
   7108 		mutex_enter(txq->txq_lock);
   7109 		wm_init_tx_queue(sc, wmq, txq);
   7110 		mutex_exit(txq->txq_lock);
   7111 
   7112 		mutex_enter(rxq->rxq_lock);
   7113 		error = wm_init_rx_queue(sc, wmq, rxq);
   7114 		mutex_exit(rxq->rxq_lock);
   7115 		if (error)
   7116 			break;
   7117 	}
   7118 
   7119 	return error;
   7120 }
   7121 
   7122 /*
   7123  * wm_tx_offload:
   7124  *
   7125  *	Set up TCP/IP checksumming parameters for the
   7126  *	specified packet.
   7127  */
   7128 static int
   7129 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7130     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7131 {
   7132 	struct mbuf *m0 = txs->txs_mbuf;
   7133 	struct livengood_tcpip_ctxdesc *t;
   7134 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7135 	uint32_t ipcse;
   7136 	struct ether_header *eh;
   7137 	int offset, iphl;
   7138 	uint8_t fields;
   7139 
   7140 	/*
   7141 	 * XXX It would be nice if the mbuf pkthdr had offset
   7142 	 * fields for the protocol headers.
   7143 	 */
   7144 
   7145 	eh = mtod(m0, struct ether_header *);
   7146 	switch (htons(eh->ether_type)) {
   7147 	case ETHERTYPE_IP:
   7148 	case ETHERTYPE_IPV6:
   7149 		offset = ETHER_HDR_LEN;
   7150 		break;
   7151 
   7152 	case ETHERTYPE_VLAN:
   7153 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7154 		break;
   7155 
   7156 	default:
   7157 		/*
   7158 		 * Don't support this protocol or encapsulation.
   7159 		 */
   7160 		*fieldsp = 0;
   7161 		*cmdp = 0;
   7162 		return 0;
   7163 	}
   7164 
   7165 	if ((m0->m_pkthdr.csum_flags &
   7166 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7167 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7168 	} else
   7169 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7170 
   7171 	ipcse = offset + iphl - 1;
   7172 
   7173 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7174 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7175 	seg = 0;
   7176 	fields = 0;
   7177 
   7178 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7179 		int hlen = offset + iphl;
   7180 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7181 
   7182 		if (__predict_false(m0->m_len <
   7183 				    (hlen + sizeof(struct tcphdr)))) {
   7184 			/*
   7185 			 * TCP/IP headers are not in the first mbuf; we need
   7186 			 * to do this the slow and painful way. Let's just
   7187 			 * hope this doesn't happen very often.
   7188 			 */
   7189 			struct tcphdr th;
   7190 
   7191 			WM_Q_EVCNT_INCR(txq, tsopain);
   7192 
   7193 			m_copydata(m0, hlen, sizeof(th), &th);
   7194 			if (v4) {
   7195 				struct ip ip;
   7196 
   7197 				m_copydata(m0, offset, sizeof(ip), &ip);
   7198 				ip.ip_len = 0;
   7199 				m_copyback(m0,
   7200 				    offset + offsetof(struct ip, ip_len),
   7201 				    sizeof(ip.ip_len), &ip.ip_len);
   7202 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7203 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7204 			} else {
   7205 				struct ip6_hdr ip6;
   7206 
   7207 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7208 				ip6.ip6_plen = 0;
   7209 				m_copyback(m0,
   7210 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7211 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7212 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7213 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7214 			}
   7215 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7216 			    sizeof(th.th_sum), &th.th_sum);
   7217 
   7218 			hlen += th.th_off << 2;
   7219 		} else {
   7220 			/*
   7221 			 * TCP/IP headers are in the first mbuf; we can do
   7222 			 * this the easy way.
   7223 			 */
   7224 			struct tcphdr *th;
   7225 
   7226 			if (v4) {
   7227 				struct ip *ip =
   7228 				    (void *)(mtod(m0, char *) + offset);
   7229 				th = (void *)(mtod(m0, char *) + hlen);
   7230 
   7231 				ip->ip_len = 0;
   7232 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7233 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7234 			} else {
   7235 				struct ip6_hdr *ip6 =
   7236 				    (void *)(mtod(m0, char *) + offset);
   7237 				th = (void *)(mtod(m0, char *) + hlen);
   7238 
   7239 				ip6->ip6_plen = 0;
   7240 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7241 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7242 			}
   7243 			hlen += th->th_off << 2;
   7244 		}
   7245 
   7246 		if (v4) {
   7247 			WM_Q_EVCNT_INCR(txq, tso);
   7248 			cmdlen |= WTX_TCPIP_CMD_IP;
   7249 		} else {
   7250 			WM_Q_EVCNT_INCR(txq, tso6);
   7251 			ipcse = 0;
   7252 		}
   7253 		cmd |= WTX_TCPIP_CMD_TSE;
   7254 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7255 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7256 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7257 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7258 	}
   7259 
   7260 	/*
   7261 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7262 	 * offload feature, if we load the context descriptor, we
   7263 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7264 	 */
   7265 
   7266 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7267 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7268 	    WTX_TCPIP_IPCSE(ipcse);
   7269 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7270 		WM_Q_EVCNT_INCR(txq, ipsum);
   7271 		fields |= WTX_IXSM;
   7272 	}
   7273 
   7274 	offset += iphl;
   7275 
   7276 	if (m0->m_pkthdr.csum_flags &
   7277 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7278 		WM_Q_EVCNT_INCR(txq, tusum);
   7279 		fields |= WTX_TXSM;
   7280 		tucs = WTX_TCPIP_TUCSS(offset) |
   7281 		    WTX_TCPIP_TUCSO(offset +
   7282 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7283 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7284 	} else if ((m0->m_pkthdr.csum_flags &
   7285 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7286 		WM_Q_EVCNT_INCR(txq, tusum6);
   7287 		fields |= WTX_TXSM;
   7288 		tucs = WTX_TCPIP_TUCSS(offset) |
   7289 		    WTX_TCPIP_TUCSO(offset +
   7290 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7291 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7292 	} else {
   7293 		/* Just initialize it to a valid TCP context. */
   7294 		tucs = WTX_TCPIP_TUCSS(offset) |
   7295 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7296 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7297 	}
   7298 
   7299 	/*
   7300 	 * We don't have to write context descriptor for every packet
   7301 	 * except for 82574. For 82574, we must write context descriptor
   7302 	 * for every packet when we use two descriptor queues.
   7303 	 * It would be overhead to write context descriptor for every packet,
   7304 	 * however it does not cause problems.
   7305 	 */
   7306 	/* Fill in the context descriptor. */
   7307 	t = (struct livengood_tcpip_ctxdesc *)
   7308 	    &txq->txq_descs[txq->txq_next];
   7309 	t->tcpip_ipcs = htole32(ipcs);
   7310 	t->tcpip_tucs = htole32(tucs);
   7311 	t->tcpip_cmdlen = htole32(cmdlen);
   7312 	t->tcpip_seg = htole32(seg);
   7313 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7314 
   7315 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7316 	txs->txs_ndesc++;
   7317 
   7318 	*cmdp = cmd;
   7319 	*fieldsp = fields;
   7320 
   7321 	return 0;
   7322 }
   7323 
   7324 static inline int
   7325 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7326 {
   7327 	struct wm_softc *sc = ifp->if_softc;
   7328 	u_int cpuid = cpu_index(curcpu());
   7329 
   7330 	/*
   7331 	 * Currently, simple distribute strategy.
   7332 	 * TODO:
   7333 	 * distribute by flowid(RSS has value).
   7334 	 */
   7335 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7336 }
   7337 
   7338 /*
   7339  * wm_start:		[ifnet interface function]
   7340  *
   7341  *	Start packet transmission on the interface.
   7342  */
   7343 static void
   7344 wm_start(struct ifnet *ifp)
   7345 {
   7346 	struct wm_softc *sc = ifp->if_softc;
   7347 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7348 
   7349 #ifdef WM_MPSAFE
   7350 	KASSERT(if_is_mpsafe(ifp));
   7351 #endif
   7352 	/*
   7353 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7354 	 */
   7355 
   7356 	mutex_enter(txq->txq_lock);
   7357 	if (!txq->txq_stopping)
   7358 		wm_start_locked(ifp);
   7359 	mutex_exit(txq->txq_lock);
   7360 }
   7361 
   7362 static void
   7363 wm_start_locked(struct ifnet *ifp)
   7364 {
   7365 	struct wm_softc *sc = ifp->if_softc;
   7366 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7367 
   7368 	wm_send_common_locked(ifp, txq, false);
   7369 }
   7370 
   7371 static int
   7372 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7373 {
   7374 	int qid;
   7375 	struct wm_softc *sc = ifp->if_softc;
   7376 	struct wm_txqueue *txq;
   7377 
   7378 	qid = wm_select_txqueue(ifp, m);
   7379 	txq = &sc->sc_queue[qid].wmq_txq;
   7380 
   7381 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7382 		m_freem(m);
   7383 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7384 		return ENOBUFS;
   7385 	}
   7386 
   7387 	/*
   7388 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7389 	 */
   7390 	ifp->if_obytes += m->m_pkthdr.len;
   7391 	if (m->m_flags & M_MCAST)
   7392 		ifp->if_omcasts++;
   7393 
   7394 	if (mutex_tryenter(txq->txq_lock)) {
   7395 		if (!txq->txq_stopping)
   7396 			wm_transmit_locked(ifp, txq);
   7397 		mutex_exit(txq->txq_lock);
   7398 	}
   7399 
   7400 	return 0;
   7401 }
   7402 
   7403 static void
   7404 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7405 {
   7406 
   7407 	wm_send_common_locked(ifp, txq, true);
   7408 }
   7409 
   7410 static void
   7411 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7412     bool is_transmit)
   7413 {
   7414 	struct wm_softc *sc = ifp->if_softc;
   7415 	struct mbuf *m0;
   7416 	struct wm_txsoft *txs;
   7417 	bus_dmamap_t dmamap;
   7418 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7419 	bus_addr_t curaddr;
   7420 	bus_size_t seglen, curlen;
   7421 	uint32_t cksumcmd;
   7422 	uint8_t cksumfields;
   7423 	bool remap = true;
   7424 
   7425 	KASSERT(mutex_owned(txq->txq_lock));
   7426 
   7427 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7428 		return;
   7429 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7430 		return;
   7431 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7432 		return;
   7433 
   7434 	/* Remember the previous number of free descriptors. */
   7435 	ofree = txq->txq_free;
   7436 
   7437 	/*
   7438 	 * Loop through the send queue, setting up transmit descriptors
   7439 	 * until we drain the queue, or use up all available transmit
   7440 	 * descriptors.
   7441 	 */
   7442 	for (;;) {
   7443 		m0 = NULL;
   7444 
   7445 		/* Get a work queue entry. */
   7446 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7447 			wm_txeof(txq, UINT_MAX);
   7448 			if (txq->txq_sfree == 0) {
   7449 				DPRINTF(WM_DEBUG_TX,
   7450 				    ("%s: TX: no free job descriptors\n",
   7451 					device_xname(sc->sc_dev)));
   7452 				WM_Q_EVCNT_INCR(txq, txsstall);
   7453 				break;
   7454 			}
   7455 		}
   7456 
   7457 		/* Grab a packet off the queue. */
   7458 		if (is_transmit)
   7459 			m0 = pcq_get(txq->txq_interq);
   7460 		else
   7461 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7462 		if (m0 == NULL)
   7463 			break;
   7464 
   7465 		DPRINTF(WM_DEBUG_TX,
   7466 		    ("%s: TX: have packet to transmit: %p\n",
   7467 			device_xname(sc->sc_dev), m0));
   7468 
   7469 		txs = &txq->txq_soft[txq->txq_snext];
   7470 		dmamap = txs->txs_dmamap;
   7471 
   7472 		use_tso = (m0->m_pkthdr.csum_flags &
   7473 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7474 
   7475 		/*
   7476 		 * So says the Linux driver:
   7477 		 * The controller does a simple calculation to make sure
   7478 		 * there is enough room in the FIFO before initiating the
   7479 		 * DMA for each buffer. The calc is:
   7480 		 *	4 = ceil(buffer len / MSS)
   7481 		 * To make sure we don't overrun the FIFO, adjust the max
   7482 		 * buffer len if the MSS drops.
   7483 		 */
   7484 		dmamap->dm_maxsegsz =
   7485 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7486 		    ? m0->m_pkthdr.segsz << 2
   7487 		    : WTX_MAX_LEN;
   7488 
   7489 		/*
   7490 		 * Load the DMA map.  If this fails, the packet either
   7491 		 * didn't fit in the allotted number of segments, or we
   7492 		 * were short on resources.  For the too-many-segments
   7493 		 * case, we simply report an error and drop the packet,
   7494 		 * since we can't sanely copy a jumbo packet to a single
   7495 		 * buffer.
   7496 		 */
   7497 retry:
   7498 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7499 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7500 		if (__predict_false(error)) {
   7501 			if (error == EFBIG) {
   7502 				if (remap == true) {
   7503 					struct mbuf *m;
   7504 
   7505 					remap = false;
   7506 					m = m_defrag(m0, M_NOWAIT);
   7507 					if (m != NULL) {
   7508 						WM_Q_EVCNT_INCR(txq, defrag);
   7509 						m0 = m;
   7510 						goto retry;
   7511 					}
   7512 				}
   7513 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7514 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7515 				    "DMA segments, dropping...\n",
   7516 				    device_xname(sc->sc_dev));
   7517 				wm_dump_mbuf_chain(sc, m0);
   7518 				m_freem(m0);
   7519 				continue;
   7520 			}
   7521 			/*  Short on resources, just stop for now. */
   7522 			DPRINTF(WM_DEBUG_TX,
   7523 			    ("%s: TX: dmamap load failed: %d\n",
   7524 				device_xname(sc->sc_dev), error));
   7525 			break;
   7526 		}
   7527 
   7528 		segs_needed = dmamap->dm_nsegs;
   7529 		if (use_tso) {
   7530 			/* For sentinel descriptor; see below. */
   7531 			segs_needed++;
   7532 		}
   7533 
   7534 		/*
   7535 		 * Ensure we have enough descriptors free to describe
   7536 		 * the packet. Note, we always reserve one descriptor
   7537 		 * at the end of the ring due to the semantics of the
   7538 		 * TDT register, plus one more in the event we need
   7539 		 * to load offload context.
   7540 		 */
   7541 		if (segs_needed > txq->txq_free - 2) {
   7542 			/*
   7543 			 * Not enough free descriptors to transmit this
   7544 			 * packet.  We haven't committed anything yet,
   7545 			 * so just unload the DMA map, put the packet
   7546 			 * pack on the queue, and punt. Notify the upper
   7547 			 * layer that there are no more slots left.
   7548 			 */
   7549 			DPRINTF(WM_DEBUG_TX,
   7550 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7551 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7552 				segs_needed, txq->txq_free - 1));
   7553 			if (!is_transmit)
   7554 				ifp->if_flags |= IFF_OACTIVE;
   7555 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7556 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7557 			WM_Q_EVCNT_INCR(txq, txdstall);
   7558 			break;
   7559 		}
   7560 
   7561 		/*
   7562 		 * Check for 82547 Tx FIFO bug. We need to do this
   7563 		 * once we know we can transmit the packet, since we
   7564 		 * do some internal FIFO space accounting here.
   7565 		 */
   7566 		if (sc->sc_type == WM_T_82547 &&
   7567 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7568 			DPRINTF(WM_DEBUG_TX,
   7569 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7570 				device_xname(sc->sc_dev)));
   7571 			if (!is_transmit)
   7572 				ifp->if_flags |= IFF_OACTIVE;
   7573 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7574 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7575 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7576 			break;
   7577 		}
   7578 
   7579 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7580 
   7581 		DPRINTF(WM_DEBUG_TX,
   7582 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7583 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7584 
   7585 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7586 
   7587 		/*
   7588 		 * Store a pointer to the packet so that we can free it
   7589 		 * later.
   7590 		 *
   7591 		 * Initially, we consider the number of descriptors the
   7592 		 * packet uses the number of DMA segments.  This may be
   7593 		 * incremented by 1 if we do checksum offload (a descriptor
   7594 		 * is used to set the checksum context).
   7595 		 */
   7596 		txs->txs_mbuf = m0;
   7597 		txs->txs_firstdesc = txq->txq_next;
   7598 		txs->txs_ndesc = segs_needed;
   7599 
   7600 		/* Set up offload parameters for this packet. */
   7601 		if (m0->m_pkthdr.csum_flags &
   7602 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7603 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7604 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7605 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7606 					  &cksumfields) != 0) {
   7607 				/* Error message already displayed. */
   7608 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7609 				continue;
   7610 			}
   7611 		} else {
   7612 			cksumcmd = 0;
   7613 			cksumfields = 0;
   7614 		}
   7615 
   7616 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7617 
   7618 		/* Sync the DMA map. */
   7619 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7620 		    BUS_DMASYNC_PREWRITE);
   7621 
   7622 		/* Initialize the transmit descriptor. */
   7623 		for (nexttx = txq->txq_next, seg = 0;
   7624 		     seg < dmamap->dm_nsegs; seg++) {
   7625 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7626 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7627 			     seglen != 0;
   7628 			     curaddr += curlen, seglen -= curlen,
   7629 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7630 				curlen = seglen;
   7631 
   7632 				/*
   7633 				 * So says the Linux driver:
   7634 				 * Work around for premature descriptor
   7635 				 * write-backs in TSO mode.  Append a
   7636 				 * 4-byte sentinel descriptor.
   7637 				 */
   7638 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7639 				    curlen > 8)
   7640 					curlen -= 4;
   7641 
   7642 				wm_set_dma_addr(
   7643 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7644 				txq->txq_descs[nexttx].wtx_cmdlen
   7645 				    = htole32(cksumcmd | curlen);
   7646 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7647 				    = 0;
   7648 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7649 				    = cksumfields;
   7650 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7651 				lasttx = nexttx;
   7652 
   7653 				DPRINTF(WM_DEBUG_TX,
   7654 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7655 					"len %#04zx\n",
   7656 					device_xname(sc->sc_dev), nexttx,
   7657 					(uint64_t)curaddr, curlen));
   7658 			}
   7659 		}
   7660 
   7661 		KASSERT(lasttx != -1);
   7662 
   7663 		/*
   7664 		 * Set up the command byte on the last descriptor of
   7665 		 * the packet. If we're in the interrupt delay window,
   7666 		 * delay the interrupt.
   7667 		 */
   7668 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7669 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7670 
   7671 		/*
   7672 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7673 		 * up the descriptor to encapsulate the packet for us.
   7674 		 *
   7675 		 * This is only valid on the last descriptor of the packet.
   7676 		 */
   7677 		if (vlan_has_tag(m0)) {
   7678 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7679 			    htole32(WTX_CMD_VLE);
   7680 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7681 			    = htole16(vlan_get_tag(m0));
   7682 		}
   7683 
   7684 		txs->txs_lastdesc = lasttx;
   7685 
   7686 		DPRINTF(WM_DEBUG_TX,
   7687 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7688 			device_xname(sc->sc_dev),
   7689 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7690 
   7691 		/* Sync the descriptors we're using. */
   7692 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7693 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7694 
   7695 		/* Give the packet to the chip. */
   7696 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7697 
   7698 		DPRINTF(WM_DEBUG_TX,
   7699 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7700 
   7701 		DPRINTF(WM_DEBUG_TX,
   7702 		    ("%s: TX: finished transmitting packet, job %d\n",
   7703 			device_xname(sc->sc_dev), txq->txq_snext));
   7704 
   7705 		/* Advance the tx pointer. */
   7706 		txq->txq_free -= txs->txs_ndesc;
   7707 		txq->txq_next = nexttx;
   7708 
   7709 		txq->txq_sfree--;
   7710 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7711 
   7712 		/* Pass the packet to any BPF listeners. */
   7713 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7714 	}
   7715 
   7716 	if (m0 != NULL) {
   7717 		if (!is_transmit)
   7718 			ifp->if_flags |= IFF_OACTIVE;
   7719 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7720 		WM_Q_EVCNT_INCR(txq, descdrop);
   7721 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7722 			__func__));
   7723 		m_freem(m0);
   7724 	}
   7725 
   7726 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7727 		/* No more slots; notify upper layer. */
   7728 		if (!is_transmit)
   7729 			ifp->if_flags |= IFF_OACTIVE;
   7730 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7731 	}
   7732 
   7733 	if (txq->txq_free != ofree) {
   7734 		/* Set a watchdog timer in case the chip flakes out. */
   7735 		txq->txq_lastsent = time_uptime;
   7736 		txq->txq_sending = true;
   7737 	}
   7738 }
   7739 
   7740 /*
   7741  * wm_nq_tx_offload:
   7742  *
   7743  *	Set up TCP/IP checksumming parameters for the
   7744  *	specified packet, for NEWQUEUE devices
   7745  */
   7746 static int
   7747 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7748     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7749 {
   7750 	struct mbuf *m0 = txs->txs_mbuf;
   7751 	uint32_t vl_len, mssidx, cmdc;
   7752 	struct ether_header *eh;
   7753 	int offset, iphl;
   7754 
   7755 	/*
   7756 	 * XXX It would be nice if the mbuf pkthdr had offset
   7757 	 * fields for the protocol headers.
   7758 	 */
   7759 	*cmdlenp = 0;
   7760 	*fieldsp = 0;
   7761 
   7762 	eh = mtod(m0, struct ether_header *);
   7763 	switch (htons(eh->ether_type)) {
   7764 	case ETHERTYPE_IP:
   7765 	case ETHERTYPE_IPV6:
   7766 		offset = ETHER_HDR_LEN;
   7767 		break;
   7768 
   7769 	case ETHERTYPE_VLAN:
   7770 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7771 		break;
   7772 
   7773 	default:
   7774 		/* Don't support this protocol or encapsulation. */
   7775 		*do_csum = false;
   7776 		return 0;
   7777 	}
   7778 	*do_csum = true;
   7779 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7780 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7781 
   7782 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7783 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7784 
   7785 	if ((m0->m_pkthdr.csum_flags &
   7786 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7787 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7788 	} else {
   7789 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7790 	}
   7791 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7792 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7793 
   7794 	if (vlan_has_tag(m0)) {
   7795 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7796 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7797 		*cmdlenp |= NQTX_CMD_VLE;
   7798 	}
   7799 
   7800 	mssidx = 0;
   7801 
   7802 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7803 		int hlen = offset + iphl;
   7804 		int tcp_hlen;
   7805 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7806 
   7807 		if (__predict_false(m0->m_len <
   7808 				    (hlen + sizeof(struct tcphdr)))) {
   7809 			/*
   7810 			 * TCP/IP headers are not in the first mbuf; we need
   7811 			 * to do this the slow and painful way. Let's just
   7812 			 * hope this doesn't happen very often.
   7813 			 */
   7814 			struct tcphdr th;
   7815 
   7816 			WM_Q_EVCNT_INCR(txq, tsopain);
   7817 
   7818 			m_copydata(m0, hlen, sizeof(th), &th);
   7819 			if (v4) {
   7820 				struct ip ip;
   7821 
   7822 				m_copydata(m0, offset, sizeof(ip), &ip);
   7823 				ip.ip_len = 0;
   7824 				m_copyback(m0,
   7825 				    offset + offsetof(struct ip, ip_len),
   7826 				    sizeof(ip.ip_len), &ip.ip_len);
   7827 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7828 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7829 			} else {
   7830 				struct ip6_hdr ip6;
   7831 
   7832 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7833 				ip6.ip6_plen = 0;
   7834 				m_copyback(m0,
   7835 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7836 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7837 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7838 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7839 			}
   7840 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7841 			    sizeof(th.th_sum), &th.th_sum);
   7842 
   7843 			tcp_hlen = th.th_off << 2;
   7844 		} else {
   7845 			/*
   7846 			 * TCP/IP headers are in the first mbuf; we can do
   7847 			 * this the easy way.
   7848 			 */
   7849 			struct tcphdr *th;
   7850 
   7851 			if (v4) {
   7852 				struct ip *ip =
   7853 				    (void *)(mtod(m0, char *) + offset);
   7854 				th = (void *)(mtod(m0, char *) + hlen);
   7855 
   7856 				ip->ip_len = 0;
   7857 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7858 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7859 			} else {
   7860 				struct ip6_hdr *ip6 =
   7861 				    (void *)(mtod(m0, char *) + offset);
   7862 				th = (void *)(mtod(m0, char *) + hlen);
   7863 
   7864 				ip6->ip6_plen = 0;
   7865 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7866 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7867 			}
   7868 			tcp_hlen = th->th_off << 2;
   7869 		}
   7870 		hlen += tcp_hlen;
   7871 		*cmdlenp |= NQTX_CMD_TSE;
   7872 
   7873 		if (v4) {
   7874 			WM_Q_EVCNT_INCR(txq, tso);
   7875 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7876 		} else {
   7877 			WM_Q_EVCNT_INCR(txq, tso6);
   7878 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7879 		}
   7880 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7881 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7882 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7883 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7884 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7885 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7886 	} else {
   7887 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7888 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7889 	}
   7890 
   7891 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7892 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7893 		cmdc |= NQTXC_CMD_IP4;
   7894 	}
   7895 
   7896 	if (m0->m_pkthdr.csum_flags &
   7897 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7898 		WM_Q_EVCNT_INCR(txq, tusum);
   7899 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7900 			cmdc |= NQTXC_CMD_TCP;
   7901 		else
   7902 			cmdc |= NQTXC_CMD_UDP;
   7903 
   7904 		cmdc |= NQTXC_CMD_IP4;
   7905 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7906 	}
   7907 	if (m0->m_pkthdr.csum_flags &
   7908 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7909 		WM_Q_EVCNT_INCR(txq, tusum6);
   7910 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7911 			cmdc |= NQTXC_CMD_TCP;
   7912 		else
   7913 			cmdc |= NQTXC_CMD_UDP;
   7914 
   7915 		cmdc |= NQTXC_CMD_IP6;
   7916 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7917 	}
   7918 
   7919 	/*
   7920 	 * We don't have to write context descriptor for every packet to
   7921 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7922 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7923 	 * controllers.
   7924 	 * It would be overhead to write context descriptor for every packet,
   7925 	 * however it does not cause problems.
   7926 	 */
   7927 	/* Fill in the context descriptor. */
   7928 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7929 	    htole32(vl_len);
   7930 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7931 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7932 	    htole32(cmdc);
   7933 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7934 	    htole32(mssidx);
   7935 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7936 	DPRINTF(WM_DEBUG_TX,
   7937 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7938 		txq->txq_next, 0, vl_len));
   7939 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7940 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7941 	txs->txs_ndesc++;
   7942 	return 0;
   7943 }
   7944 
   7945 /*
   7946  * wm_nq_start:		[ifnet interface function]
   7947  *
   7948  *	Start packet transmission on the interface for NEWQUEUE devices
   7949  */
   7950 static void
   7951 wm_nq_start(struct ifnet *ifp)
   7952 {
   7953 	struct wm_softc *sc = ifp->if_softc;
   7954 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7955 
   7956 #ifdef WM_MPSAFE
   7957 	KASSERT(if_is_mpsafe(ifp));
   7958 #endif
   7959 	/*
   7960 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7961 	 */
   7962 
   7963 	mutex_enter(txq->txq_lock);
   7964 	if (!txq->txq_stopping)
   7965 		wm_nq_start_locked(ifp);
   7966 	mutex_exit(txq->txq_lock);
   7967 }
   7968 
   7969 static void
   7970 wm_nq_start_locked(struct ifnet *ifp)
   7971 {
   7972 	struct wm_softc *sc = ifp->if_softc;
   7973 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7974 
   7975 	wm_nq_send_common_locked(ifp, txq, false);
   7976 }
   7977 
   7978 static int
   7979 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7980 {
   7981 	int qid;
   7982 	struct wm_softc *sc = ifp->if_softc;
   7983 	struct wm_txqueue *txq;
   7984 
   7985 	qid = wm_select_txqueue(ifp, m);
   7986 	txq = &sc->sc_queue[qid].wmq_txq;
   7987 
   7988 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7989 		m_freem(m);
   7990 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7991 		return ENOBUFS;
   7992 	}
   7993 
   7994 	/*
   7995 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7996 	 */
   7997 	ifp->if_obytes += m->m_pkthdr.len;
   7998 	if (m->m_flags & M_MCAST)
   7999 		ifp->if_omcasts++;
   8000 
   8001 	/*
   8002 	 * The situations which this mutex_tryenter() fails at running time
   8003 	 * are below two patterns.
   8004 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8005 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8006 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8007 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8008 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8009 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8010 	 * stuck, either.
   8011 	 */
   8012 	if (mutex_tryenter(txq->txq_lock)) {
   8013 		if (!txq->txq_stopping)
   8014 			wm_nq_transmit_locked(ifp, txq);
   8015 		mutex_exit(txq->txq_lock);
   8016 	}
   8017 
   8018 	return 0;
   8019 }
   8020 
   8021 static void
   8022 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8023 {
   8024 
   8025 	wm_nq_send_common_locked(ifp, txq, true);
   8026 }
   8027 
   8028 static void
   8029 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8030     bool is_transmit)
   8031 {
   8032 	struct wm_softc *sc = ifp->if_softc;
   8033 	struct mbuf *m0;
   8034 	struct wm_txsoft *txs;
   8035 	bus_dmamap_t dmamap;
   8036 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8037 	bool do_csum, sent;
   8038 	bool remap = true;
   8039 
   8040 	KASSERT(mutex_owned(txq->txq_lock));
   8041 
   8042 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8043 		return;
   8044 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8045 		return;
   8046 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8047 		return;
   8048 
   8049 	sent = false;
   8050 
   8051 	/*
   8052 	 * Loop through the send queue, setting up transmit descriptors
   8053 	 * until we drain the queue, or use up all available transmit
   8054 	 * descriptors.
   8055 	 */
   8056 	for (;;) {
   8057 		m0 = NULL;
   8058 
   8059 		/* Get a work queue entry. */
   8060 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8061 			wm_txeof(txq, UINT_MAX);
   8062 			if (txq->txq_sfree == 0) {
   8063 				DPRINTF(WM_DEBUG_TX,
   8064 				    ("%s: TX: no free job descriptors\n",
   8065 					device_xname(sc->sc_dev)));
   8066 				WM_Q_EVCNT_INCR(txq, txsstall);
   8067 				break;
   8068 			}
   8069 		}
   8070 
   8071 		/* Grab a packet off the queue. */
   8072 		if (is_transmit)
   8073 			m0 = pcq_get(txq->txq_interq);
   8074 		else
   8075 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8076 		if (m0 == NULL)
   8077 			break;
   8078 
   8079 		DPRINTF(WM_DEBUG_TX,
   8080 		    ("%s: TX: have packet to transmit: %p\n",
   8081 		    device_xname(sc->sc_dev), m0));
   8082 
   8083 		txs = &txq->txq_soft[txq->txq_snext];
   8084 		dmamap = txs->txs_dmamap;
   8085 
   8086 		/*
   8087 		 * Load the DMA map.  If this fails, the packet either
   8088 		 * didn't fit in the allotted number of segments, or we
   8089 		 * were short on resources.  For the too-many-segments
   8090 		 * case, we simply report an error and drop the packet,
   8091 		 * since we can't sanely copy a jumbo packet to a single
   8092 		 * buffer.
   8093 		 */
   8094 retry:
   8095 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8096 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8097 		if (__predict_false(error)) {
   8098 			if (error == EFBIG) {
   8099 				if (remap == true) {
   8100 					struct mbuf *m;
   8101 
   8102 					remap = false;
   8103 					m = m_defrag(m0, M_NOWAIT);
   8104 					if (m != NULL) {
   8105 						WM_Q_EVCNT_INCR(txq, defrag);
   8106 						m0 = m;
   8107 						goto retry;
   8108 					}
   8109 				}
   8110 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8111 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8112 				    "DMA segments, dropping...\n",
   8113 				    device_xname(sc->sc_dev));
   8114 				wm_dump_mbuf_chain(sc, m0);
   8115 				m_freem(m0);
   8116 				continue;
   8117 			}
   8118 			/* Short on resources, just stop for now. */
   8119 			DPRINTF(WM_DEBUG_TX,
   8120 			    ("%s: TX: dmamap load failed: %d\n",
   8121 				device_xname(sc->sc_dev), error));
   8122 			break;
   8123 		}
   8124 
   8125 		segs_needed = dmamap->dm_nsegs;
   8126 
   8127 		/*
   8128 		 * Ensure we have enough descriptors free to describe
   8129 		 * the packet. Note, we always reserve one descriptor
   8130 		 * at the end of the ring due to the semantics of the
   8131 		 * TDT register, plus one more in the event we need
   8132 		 * to load offload context.
   8133 		 */
   8134 		if (segs_needed > txq->txq_free - 2) {
   8135 			/*
   8136 			 * Not enough free descriptors to transmit this
   8137 			 * packet.  We haven't committed anything yet,
   8138 			 * so just unload the DMA map, put the packet
   8139 			 * pack on the queue, and punt. Notify the upper
   8140 			 * layer that there are no more slots left.
   8141 			 */
   8142 			DPRINTF(WM_DEBUG_TX,
   8143 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8144 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8145 				segs_needed, txq->txq_free - 1));
   8146 			if (!is_transmit)
   8147 				ifp->if_flags |= IFF_OACTIVE;
   8148 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8149 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8150 			WM_Q_EVCNT_INCR(txq, txdstall);
   8151 			break;
   8152 		}
   8153 
   8154 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8155 
   8156 		DPRINTF(WM_DEBUG_TX,
   8157 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8158 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8159 
   8160 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8161 
   8162 		/*
   8163 		 * Store a pointer to the packet so that we can free it
   8164 		 * later.
   8165 		 *
   8166 		 * Initially, we consider the number of descriptors the
   8167 		 * packet uses the number of DMA segments.  This may be
   8168 		 * incremented by 1 if we do checksum offload (a descriptor
   8169 		 * is used to set the checksum context).
   8170 		 */
   8171 		txs->txs_mbuf = m0;
   8172 		txs->txs_firstdesc = txq->txq_next;
   8173 		txs->txs_ndesc = segs_needed;
   8174 
   8175 		/* Set up offload parameters for this packet. */
   8176 		uint32_t cmdlen, fields, dcmdlen;
   8177 		if (m0->m_pkthdr.csum_flags &
   8178 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8179 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8180 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8181 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8182 			    &do_csum) != 0) {
   8183 				/* Error message already displayed. */
   8184 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8185 				continue;
   8186 			}
   8187 		} else {
   8188 			do_csum = false;
   8189 			cmdlen = 0;
   8190 			fields = 0;
   8191 		}
   8192 
   8193 		/* Sync the DMA map. */
   8194 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8195 		    BUS_DMASYNC_PREWRITE);
   8196 
   8197 		/* Initialize the first transmit descriptor. */
   8198 		nexttx = txq->txq_next;
   8199 		if (!do_csum) {
   8200 			/* setup a legacy descriptor */
   8201 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8202 			    dmamap->dm_segs[0].ds_addr);
   8203 			txq->txq_descs[nexttx].wtx_cmdlen =
   8204 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8205 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8206 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8207 			if (vlan_has_tag(m0)) {
   8208 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8209 				    htole32(WTX_CMD_VLE);
   8210 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8211 				    htole16(vlan_get_tag(m0));
   8212 			} else
   8213 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8214 
   8215 			dcmdlen = 0;
   8216 		} else {
   8217 			/* setup an advanced data descriptor */
   8218 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8219 			    htole64(dmamap->dm_segs[0].ds_addr);
   8220 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8221 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8222 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8223 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8224 			    htole32(fields);
   8225 			DPRINTF(WM_DEBUG_TX,
   8226 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8227 				device_xname(sc->sc_dev), nexttx,
   8228 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8229 			DPRINTF(WM_DEBUG_TX,
   8230 			    ("\t 0x%08x%08x\n", fields,
   8231 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8232 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8233 		}
   8234 
   8235 		lasttx = nexttx;
   8236 		nexttx = WM_NEXTTX(txq, nexttx);
   8237 		/*
   8238 		 * fill in the next descriptors. legacy or advanced format
   8239 		 * is the same here
   8240 		 */
   8241 		for (seg = 1; seg < dmamap->dm_nsegs;
   8242 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8243 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8244 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8245 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8246 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8247 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8248 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8249 			lasttx = nexttx;
   8250 
   8251 			DPRINTF(WM_DEBUG_TX,
   8252 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8253 				device_xname(sc->sc_dev), nexttx,
   8254 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8255 				dmamap->dm_segs[seg].ds_len));
   8256 		}
   8257 
   8258 		KASSERT(lasttx != -1);
   8259 
   8260 		/*
   8261 		 * Set up the command byte on the last descriptor of
   8262 		 * the packet. If we're in the interrupt delay window,
   8263 		 * delay the interrupt.
   8264 		 */
   8265 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8266 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8267 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8268 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8269 
   8270 		txs->txs_lastdesc = lasttx;
   8271 
   8272 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8273 		    device_xname(sc->sc_dev),
   8274 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8275 
   8276 		/* Sync the descriptors we're using. */
   8277 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8278 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8279 
   8280 		/* Give the packet to the chip. */
   8281 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8282 		sent = true;
   8283 
   8284 		DPRINTF(WM_DEBUG_TX,
   8285 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8286 
   8287 		DPRINTF(WM_DEBUG_TX,
   8288 		    ("%s: TX: finished transmitting packet, job %d\n",
   8289 			device_xname(sc->sc_dev), txq->txq_snext));
   8290 
   8291 		/* Advance the tx pointer. */
   8292 		txq->txq_free -= txs->txs_ndesc;
   8293 		txq->txq_next = nexttx;
   8294 
   8295 		txq->txq_sfree--;
   8296 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8297 
   8298 		/* Pass the packet to any BPF listeners. */
   8299 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8300 	}
   8301 
   8302 	if (m0 != NULL) {
   8303 		if (!is_transmit)
   8304 			ifp->if_flags |= IFF_OACTIVE;
   8305 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8306 		WM_Q_EVCNT_INCR(txq, descdrop);
   8307 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8308 			__func__));
   8309 		m_freem(m0);
   8310 	}
   8311 
   8312 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8313 		/* No more slots; notify upper layer. */
   8314 		if (!is_transmit)
   8315 			ifp->if_flags |= IFF_OACTIVE;
   8316 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8317 	}
   8318 
   8319 	if (sent) {
   8320 		/* Set a watchdog timer in case the chip flakes out. */
   8321 		txq->txq_lastsent = time_uptime;
   8322 		txq->txq_sending = true;
   8323 	}
   8324 }
   8325 
   8326 static void
   8327 wm_deferred_start_locked(struct wm_txqueue *txq)
   8328 {
   8329 	struct wm_softc *sc = txq->txq_sc;
   8330 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8331 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8332 	int qid = wmq->wmq_id;
   8333 
   8334 	KASSERT(mutex_owned(txq->txq_lock));
   8335 
   8336 	if (txq->txq_stopping) {
   8337 		mutex_exit(txq->txq_lock);
   8338 		return;
   8339 	}
   8340 
   8341 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8342 		/* XXX need for ALTQ or one CPU system */
   8343 		if (qid == 0)
   8344 			wm_nq_start_locked(ifp);
   8345 		wm_nq_transmit_locked(ifp, txq);
   8346 	} else {
   8347 		/* XXX need for ALTQ or one CPU system */
   8348 		if (qid == 0)
   8349 			wm_start_locked(ifp);
   8350 		wm_transmit_locked(ifp, txq);
   8351 	}
   8352 }
   8353 
   8354 /* Interrupt */
   8355 
   8356 /*
   8357  * wm_txeof:
   8358  *
   8359  *	Helper; handle transmit interrupts.
   8360  */
   8361 static bool
   8362 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8363 {
   8364 	struct wm_softc *sc = txq->txq_sc;
   8365 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8366 	struct wm_txsoft *txs;
   8367 	int count = 0;
   8368 	int i;
   8369 	uint8_t status;
   8370 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8371 	bool more = false;
   8372 
   8373 	KASSERT(mutex_owned(txq->txq_lock));
   8374 
   8375 	if (txq->txq_stopping)
   8376 		return false;
   8377 
   8378 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8379 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8380 	if (wmq->wmq_id == 0)
   8381 		ifp->if_flags &= ~IFF_OACTIVE;
   8382 
   8383 	/*
   8384 	 * Go through the Tx list and free mbufs for those
   8385 	 * frames which have been transmitted.
   8386 	 */
   8387 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8388 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8389 		if (limit-- == 0) {
   8390 			more = true;
   8391 			DPRINTF(WM_DEBUG_TX,
   8392 			    ("%s: TX: loop limited, job %d is not processed\n",
   8393 				device_xname(sc->sc_dev), i));
   8394 			break;
   8395 		}
   8396 
   8397 		txs = &txq->txq_soft[i];
   8398 
   8399 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8400 			device_xname(sc->sc_dev), i));
   8401 
   8402 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8403 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8404 
   8405 		status =
   8406 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8407 		if ((status & WTX_ST_DD) == 0) {
   8408 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8409 			    BUS_DMASYNC_PREREAD);
   8410 			break;
   8411 		}
   8412 
   8413 		count++;
   8414 		DPRINTF(WM_DEBUG_TX,
   8415 		    ("%s: TX: job %d done: descs %d..%d\n",
   8416 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8417 		    txs->txs_lastdesc));
   8418 
   8419 		/*
   8420 		 * XXX We should probably be using the statistics
   8421 		 * XXX registers, but I don't know if they exist
   8422 		 * XXX on chips before the i82544.
   8423 		 */
   8424 
   8425 #ifdef WM_EVENT_COUNTERS
   8426 		if (status & WTX_ST_TU)
   8427 			WM_Q_EVCNT_INCR(txq, underrun);
   8428 #endif /* WM_EVENT_COUNTERS */
   8429 
   8430 		/*
   8431 		 * 82574 and newer's document says the status field has neither
   8432 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8433 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8434 		 * Developer's Manual", 82574 datasheet and newer.
   8435 		 *
   8436 		 * XXX I saw the LC bit was set on I218 even though the media
   8437 		 * was full duplex, so the bit might be used for other
   8438 		 * meaning ...(I have no document).
   8439 		 */
   8440 
   8441 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8442 		    && ((sc->sc_type < WM_T_82574)
   8443 			|| (sc->sc_type == WM_T_80003))) {
   8444 			ifp->if_oerrors++;
   8445 			if (status & WTX_ST_LC)
   8446 				log(LOG_WARNING, "%s: late collision\n",
   8447 				    device_xname(sc->sc_dev));
   8448 			else if (status & WTX_ST_EC) {
   8449 				ifp->if_collisions +=
   8450 				    TX_COLLISION_THRESHOLD + 1;
   8451 				log(LOG_WARNING, "%s: excessive collisions\n",
   8452 				    device_xname(sc->sc_dev));
   8453 			}
   8454 		} else
   8455 			ifp->if_opackets++;
   8456 
   8457 		txq->txq_packets++;
   8458 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8459 
   8460 		txq->txq_free += txs->txs_ndesc;
   8461 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8462 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8463 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8464 		m_freem(txs->txs_mbuf);
   8465 		txs->txs_mbuf = NULL;
   8466 	}
   8467 
   8468 	/* Update the dirty transmit buffer pointer. */
   8469 	txq->txq_sdirty = i;
   8470 	DPRINTF(WM_DEBUG_TX,
   8471 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8472 
   8473 	if (count != 0)
   8474 		rnd_add_uint32(&sc->rnd_source, count);
   8475 
   8476 	/*
   8477 	 * If there are no more pending transmissions, cancel the watchdog
   8478 	 * timer.
   8479 	 */
   8480 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8481 		txq->txq_sending = false;
   8482 
   8483 	return more;
   8484 }
   8485 
   8486 static inline uint32_t
   8487 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8488 {
   8489 	struct wm_softc *sc = rxq->rxq_sc;
   8490 
   8491 	if (sc->sc_type == WM_T_82574)
   8492 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8493 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8494 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8495 	else
   8496 		return rxq->rxq_descs[idx].wrx_status;
   8497 }
   8498 
   8499 static inline uint32_t
   8500 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8501 {
   8502 	struct wm_softc *sc = rxq->rxq_sc;
   8503 
   8504 	if (sc->sc_type == WM_T_82574)
   8505 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8506 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8507 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8508 	else
   8509 		return rxq->rxq_descs[idx].wrx_errors;
   8510 }
   8511 
   8512 static inline uint16_t
   8513 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8514 {
   8515 	struct wm_softc *sc = rxq->rxq_sc;
   8516 
   8517 	if (sc->sc_type == WM_T_82574)
   8518 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8519 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8520 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8521 	else
   8522 		return rxq->rxq_descs[idx].wrx_special;
   8523 }
   8524 
   8525 static inline int
   8526 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8527 {
   8528 	struct wm_softc *sc = rxq->rxq_sc;
   8529 
   8530 	if (sc->sc_type == WM_T_82574)
   8531 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8532 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8533 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8534 	else
   8535 		return rxq->rxq_descs[idx].wrx_len;
   8536 }
   8537 
   8538 #ifdef WM_DEBUG
   8539 static inline uint32_t
   8540 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8541 {
   8542 	struct wm_softc *sc = rxq->rxq_sc;
   8543 
   8544 	if (sc->sc_type == WM_T_82574)
   8545 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8546 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8547 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8548 	else
   8549 		return 0;
   8550 }
   8551 
   8552 static inline uint8_t
   8553 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8554 {
   8555 	struct wm_softc *sc = rxq->rxq_sc;
   8556 
   8557 	if (sc->sc_type == WM_T_82574)
   8558 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8559 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8560 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8561 	else
   8562 		return 0;
   8563 }
   8564 #endif /* WM_DEBUG */
   8565 
   8566 static inline bool
   8567 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8568     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8569 {
   8570 
   8571 	if (sc->sc_type == WM_T_82574)
   8572 		return (status & ext_bit) != 0;
   8573 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8574 		return (status & nq_bit) != 0;
   8575 	else
   8576 		return (status & legacy_bit) != 0;
   8577 }
   8578 
   8579 static inline bool
   8580 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8581     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8582 {
   8583 
   8584 	if (sc->sc_type == WM_T_82574)
   8585 		return (error & ext_bit) != 0;
   8586 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8587 		return (error & nq_bit) != 0;
   8588 	else
   8589 		return (error & legacy_bit) != 0;
   8590 }
   8591 
   8592 static inline bool
   8593 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8594 {
   8595 
   8596 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8597 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8598 		return true;
   8599 	else
   8600 		return false;
   8601 }
   8602 
   8603 static inline bool
   8604 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8605 {
   8606 	struct wm_softc *sc = rxq->rxq_sc;
   8607 
   8608 	/* XXXX missing error bit for newqueue? */
   8609 	if (wm_rxdesc_is_set_error(sc, errors,
   8610 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8611 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8612 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8613 		NQRXC_ERROR_RXE)) {
   8614 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8615 		    EXTRXC_ERROR_SE, 0))
   8616 			log(LOG_WARNING, "%s: symbol error\n",
   8617 			    device_xname(sc->sc_dev));
   8618 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8619 		    EXTRXC_ERROR_SEQ, 0))
   8620 			log(LOG_WARNING, "%s: receive sequence error\n",
   8621 			    device_xname(sc->sc_dev));
   8622 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8623 		    EXTRXC_ERROR_CE, 0))
   8624 			log(LOG_WARNING, "%s: CRC error\n",
   8625 			    device_xname(sc->sc_dev));
   8626 		return true;
   8627 	}
   8628 
   8629 	return false;
   8630 }
   8631 
   8632 static inline bool
   8633 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8634 {
   8635 	struct wm_softc *sc = rxq->rxq_sc;
   8636 
   8637 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8638 		NQRXC_STATUS_DD)) {
   8639 		/* We have processed all of the receive descriptors. */
   8640 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8641 		return false;
   8642 	}
   8643 
   8644 	return true;
   8645 }
   8646 
   8647 static inline bool
   8648 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8649     uint16_t vlantag, struct mbuf *m)
   8650 {
   8651 
   8652 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8653 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8654 		vlan_set_tag(m, le16toh(vlantag));
   8655 	}
   8656 
   8657 	return true;
   8658 }
   8659 
   8660 static inline void
   8661 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8662     uint32_t errors, struct mbuf *m)
   8663 {
   8664 	struct wm_softc *sc = rxq->rxq_sc;
   8665 
   8666 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8667 		if (wm_rxdesc_is_set_status(sc, status,
   8668 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8669 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8670 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8671 			if (wm_rxdesc_is_set_error(sc, errors,
   8672 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8673 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8674 		}
   8675 		if (wm_rxdesc_is_set_status(sc, status,
   8676 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8677 			/*
   8678 			 * Note: we don't know if this was TCP or UDP,
   8679 			 * so we just set both bits, and expect the
   8680 			 * upper layers to deal.
   8681 			 */
   8682 			WM_Q_EVCNT_INCR(rxq, tusum);
   8683 			m->m_pkthdr.csum_flags |=
   8684 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8685 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8686 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8687 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8688 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8689 		}
   8690 	}
   8691 }
   8692 
   8693 /*
   8694  * wm_rxeof:
   8695  *
   8696  *	Helper; handle receive interrupts.
   8697  */
   8698 static bool
   8699 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8700 {
   8701 	struct wm_softc *sc = rxq->rxq_sc;
   8702 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8703 	struct wm_rxsoft *rxs;
   8704 	struct mbuf *m;
   8705 	int i, len;
   8706 	int count = 0;
   8707 	uint32_t status, errors;
   8708 	uint16_t vlantag;
   8709 	bool more = false;
   8710 
   8711 	KASSERT(mutex_owned(rxq->rxq_lock));
   8712 
   8713 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8714 		if (limit-- == 0) {
   8715 			rxq->rxq_ptr = i;
   8716 			more = true;
   8717 			DPRINTF(WM_DEBUG_RX,
   8718 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8719 				device_xname(sc->sc_dev), i));
   8720 			break;
   8721 		}
   8722 
   8723 		rxs = &rxq->rxq_soft[i];
   8724 
   8725 		DPRINTF(WM_DEBUG_RX,
   8726 		    ("%s: RX: checking descriptor %d\n",
   8727 			device_xname(sc->sc_dev), i));
   8728 		wm_cdrxsync(rxq, i,
   8729 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8730 
   8731 		status = wm_rxdesc_get_status(rxq, i);
   8732 		errors = wm_rxdesc_get_errors(rxq, i);
   8733 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8734 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8735 #ifdef WM_DEBUG
   8736 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8737 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8738 #endif
   8739 
   8740 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8741 			/*
   8742 			 * Update the receive pointer holding rxq_lock
   8743 			 * consistent with increment counter.
   8744 			 */
   8745 			rxq->rxq_ptr = i;
   8746 			break;
   8747 		}
   8748 
   8749 		count++;
   8750 		if (__predict_false(rxq->rxq_discard)) {
   8751 			DPRINTF(WM_DEBUG_RX,
   8752 			    ("%s: RX: discarding contents of descriptor %d\n",
   8753 				device_xname(sc->sc_dev), i));
   8754 			wm_init_rxdesc(rxq, i);
   8755 			if (wm_rxdesc_is_eop(rxq, status)) {
   8756 				/* Reset our state. */
   8757 				DPRINTF(WM_DEBUG_RX,
   8758 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8759 					device_xname(sc->sc_dev)));
   8760 				rxq->rxq_discard = 0;
   8761 			}
   8762 			continue;
   8763 		}
   8764 
   8765 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8766 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8767 
   8768 		m = rxs->rxs_mbuf;
   8769 
   8770 		/*
   8771 		 * Add a new receive buffer to the ring, unless of
   8772 		 * course the length is zero. Treat the latter as a
   8773 		 * failed mapping.
   8774 		 */
   8775 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8776 			/*
   8777 			 * Failed, throw away what we've done so
   8778 			 * far, and discard the rest of the packet.
   8779 			 */
   8780 			ifp->if_ierrors++;
   8781 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8782 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8783 			wm_init_rxdesc(rxq, i);
   8784 			if (!wm_rxdesc_is_eop(rxq, status))
   8785 				rxq->rxq_discard = 1;
   8786 			if (rxq->rxq_head != NULL)
   8787 				m_freem(rxq->rxq_head);
   8788 			WM_RXCHAIN_RESET(rxq);
   8789 			DPRINTF(WM_DEBUG_RX,
   8790 			    ("%s: RX: Rx buffer allocation failed, "
   8791 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8792 				rxq->rxq_discard ? " (discard)" : ""));
   8793 			continue;
   8794 		}
   8795 
   8796 		m->m_len = len;
   8797 		rxq->rxq_len += len;
   8798 		DPRINTF(WM_DEBUG_RX,
   8799 		    ("%s: RX: buffer at %p len %d\n",
   8800 			device_xname(sc->sc_dev), m->m_data, len));
   8801 
   8802 		/* If this is not the end of the packet, keep looking. */
   8803 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8804 			WM_RXCHAIN_LINK(rxq, m);
   8805 			DPRINTF(WM_DEBUG_RX,
   8806 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8807 				device_xname(sc->sc_dev), rxq->rxq_len));
   8808 			continue;
   8809 		}
   8810 
   8811 		/*
   8812 		 * Okay, we have the entire packet now. The chip is
   8813 		 * configured to include the FCS except I350 and I21[01]
   8814 		 * (not all chips can be configured to strip it),
   8815 		 * so we need to trim it.
   8816 		 * May need to adjust length of previous mbuf in the
   8817 		 * chain if the current mbuf is too short.
   8818 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8819 		 * is always set in I350, so we don't trim it.
   8820 		 */
   8821 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8822 		    && (sc->sc_type != WM_T_I210)
   8823 		    && (sc->sc_type != WM_T_I211)) {
   8824 			if (m->m_len < ETHER_CRC_LEN) {
   8825 				rxq->rxq_tail->m_len
   8826 				    -= (ETHER_CRC_LEN - m->m_len);
   8827 				m->m_len = 0;
   8828 			} else
   8829 				m->m_len -= ETHER_CRC_LEN;
   8830 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8831 		} else
   8832 			len = rxq->rxq_len;
   8833 
   8834 		WM_RXCHAIN_LINK(rxq, m);
   8835 
   8836 		*rxq->rxq_tailp = NULL;
   8837 		m = rxq->rxq_head;
   8838 
   8839 		WM_RXCHAIN_RESET(rxq);
   8840 
   8841 		DPRINTF(WM_DEBUG_RX,
   8842 		    ("%s: RX: have entire packet, len -> %d\n",
   8843 			device_xname(sc->sc_dev), len));
   8844 
   8845 		/* If an error occurred, update stats and drop the packet. */
   8846 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8847 			m_freem(m);
   8848 			continue;
   8849 		}
   8850 
   8851 		/* No errors.  Receive the packet. */
   8852 		m_set_rcvif(m, ifp);
   8853 		m->m_pkthdr.len = len;
   8854 		/*
   8855 		 * TODO
   8856 		 * should be save rsshash and rsstype to this mbuf.
   8857 		 */
   8858 		DPRINTF(WM_DEBUG_RX,
   8859 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8860 			device_xname(sc->sc_dev), rsstype, rsshash));
   8861 
   8862 		/*
   8863 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8864 		 * for us.  Associate the tag with the packet.
   8865 		 */
   8866 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8867 			continue;
   8868 
   8869 		/* Set up checksum info for this packet. */
   8870 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8871 		/*
   8872 		 * Update the receive pointer holding rxq_lock consistent with
   8873 		 * increment counter.
   8874 		 */
   8875 		rxq->rxq_ptr = i;
   8876 		rxq->rxq_packets++;
   8877 		rxq->rxq_bytes += len;
   8878 		mutex_exit(rxq->rxq_lock);
   8879 
   8880 		/* Pass it on. */
   8881 		if_percpuq_enqueue(sc->sc_ipq, m);
   8882 
   8883 		mutex_enter(rxq->rxq_lock);
   8884 
   8885 		if (rxq->rxq_stopping)
   8886 			break;
   8887 	}
   8888 
   8889 	if (count != 0)
   8890 		rnd_add_uint32(&sc->rnd_source, count);
   8891 
   8892 	DPRINTF(WM_DEBUG_RX,
   8893 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8894 
   8895 	return more;
   8896 }
   8897 
   8898 /*
   8899  * wm_linkintr_gmii:
   8900  *
   8901  *	Helper; handle link interrupts for GMII.
   8902  */
   8903 static void
   8904 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8905 {
   8906 
   8907 	KASSERT(WM_CORE_LOCKED(sc));
   8908 
   8909 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8910 		__func__));
   8911 
   8912 	if (icr & ICR_LSC) {
   8913 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8914 		uint32_t reg;
   8915 		bool link;
   8916 
   8917 		link = status & STATUS_LU;
   8918 		if (link) {
   8919 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8920 				device_xname(sc->sc_dev),
   8921 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8922 		} else {
   8923 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8924 				device_xname(sc->sc_dev)));
   8925 		}
   8926 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8927 			wm_gig_downshift_workaround_ich8lan(sc);
   8928 
   8929 		if ((sc->sc_type == WM_T_ICH8)
   8930 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8931 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8932 		}
   8933 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8934 			device_xname(sc->sc_dev)));
   8935 		mii_pollstat(&sc->sc_mii);
   8936 		if (sc->sc_type == WM_T_82543) {
   8937 			int miistatus, active;
   8938 
   8939 			/*
   8940 			 * With 82543, we need to force speed and
   8941 			 * duplex on the MAC equal to what the PHY
   8942 			 * speed and duplex configuration is.
   8943 			 */
   8944 			miistatus = sc->sc_mii.mii_media_status;
   8945 
   8946 			if (miistatus & IFM_ACTIVE) {
   8947 				active = sc->sc_mii.mii_media_active;
   8948 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8949 				switch (IFM_SUBTYPE(active)) {
   8950 				case IFM_10_T:
   8951 					sc->sc_ctrl |= CTRL_SPEED_10;
   8952 					break;
   8953 				case IFM_100_TX:
   8954 					sc->sc_ctrl |= CTRL_SPEED_100;
   8955 					break;
   8956 				case IFM_1000_T:
   8957 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8958 					break;
   8959 				default:
   8960 					/*
   8961 					 * fiber?
   8962 					 * Shoud not enter here.
   8963 					 */
   8964 					printf("unknown media (%x)\n", active);
   8965 					break;
   8966 				}
   8967 				if (active & IFM_FDX)
   8968 					sc->sc_ctrl |= CTRL_FD;
   8969 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8970 			}
   8971 		} else if (sc->sc_type == WM_T_PCH) {
   8972 			wm_k1_gig_workaround_hv(sc,
   8973 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8974 		}
   8975 
   8976 		/*
   8977 		 * I217 Packet Loss issue:
   8978 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8979 		 * on power up.
   8980 		 * Set the Beacon Duration for I217 to 8 usec
   8981 		 */
   8982 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8983 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8984 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8985 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8986 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8987 		}
   8988 
   8989 		/* Work-around I218 hang issue */
   8990 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8991 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8992 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8993 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8994 			wm_k1_workaround_lpt_lp(sc, link);
   8995 
   8996 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8997 			/*
   8998 			 * Set platform power management values for Latency
   8999 			 * Tolerance Reporting (LTR)
   9000 			 */
   9001 			wm_platform_pm_pch_lpt(sc,
   9002 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9003 		}
   9004 
   9005 		/* FEXTNVM6 K1-off workaround */
   9006 		if (sc->sc_type == WM_T_PCH_SPT) {
   9007 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9008 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   9009 			    & FEXTNVM6_K1_OFF_ENABLE)
   9010 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   9011 			else
   9012 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9013 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9014 		}
   9015 
   9016 		if (!link)
   9017 			return;
   9018 
   9019 		switch (sc->sc_type) {
   9020 		case WM_T_PCH2:
   9021 			wm_k1_workaround_lv(sc);
   9022 			/* FALLTHROUGH */
   9023 		case WM_T_PCH:
   9024 			if (sc->sc_phytype == WMPHY_82578)
   9025 				wm_link_stall_workaround_hv(sc);
   9026 			break;
   9027 		default:
   9028 			break;
   9029 		}
   9030 	} else if (icr & ICR_RXSEQ) {
   9031 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   9032 			device_xname(sc->sc_dev)));
   9033 	}
   9034 }
   9035 
   9036 /*
   9037  * wm_linkintr_tbi:
   9038  *
   9039  *	Helper; handle link interrupts for TBI mode.
   9040  */
   9041 static void
   9042 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9043 {
   9044 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9045 	uint32_t status;
   9046 
   9047 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9048 		__func__));
   9049 
   9050 	status = CSR_READ(sc, WMREG_STATUS);
   9051 	if (icr & ICR_LSC) {
   9052 		wm_check_for_link(sc);
   9053 		if (status & STATUS_LU) {
   9054 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9055 				device_xname(sc->sc_dev),
   9056 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9057 			/*
   9058 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9059 			 * so we should update sc->sc_ctrl
   9060 			 */
   9061 
   9062 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9063 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9064 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9065 			if (status & STATUS_FD)
   9066 				sc->sc_tctl |=
   9067 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9068 			else
   9069 				sc->sc_tctl |=
   9070 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9071 			if (sc->sc_ctrl & CTRL_TFCE)
   9072 				sc->sc_fcrtl |= FCRTL_XONE;
   9073 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9074 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9075 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9076 			sc->sc_tbi_linkup = 1;
   9077 			if_link_state_change(ifp, LINK_STATE_UP);
   9078 		} else {
   9079 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9080 				device_xname(sc->sc_dev)));
   9081 			sc->sc_tbi_linkup = 0;
   9082 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9083 		}
   9084 		/* Update LED */
   9085 		wm_tbi_serdes_set_linkled(sc);
   9086 	} else if (icr & ICR_RXSEQ) {
   9087 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9088 			device_xname(sc->sc_dev)));
   9089 	}
   9090 }
   9091 
   9092 /*
   9093  * wm_linkintr_serdes:
   9094  *
   9095  *	Helper; handle link interrupts for TBI mode.
   9096  */
   9097 static void
   9098 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9099 {
   9100 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9101 	struct mii_data *mii = &sc->sc_mii;
   9102 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9103 	uint32_t pcs_adv, pcs_lpab, reg;
   9104 
   9105 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9106 		__func__));
   9107 
   9108 	if (icr & ICR_LSC) {
   9109 		/* Check PCS */
   9110 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9111 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9112 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9113 				device_xname(sc->sc_dev)));
   9114 			mii->mii_media_status |= IFM_ACTIVE;
   9115 			sc->sc_tbi_linkup = 1;
   9116 			if_link_state_change(ifp, LINK_STATE_UP);
   9117 		} else {
   9118 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9119 				device_xname(sc->sc_dev)));
   9120 			mii->mii_media_status |= IFM_NONE;
   9121 			sc->sc_tbi_linkup = 0;
   9122 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9123 			wm_tbi_serdes_set_linkled(sc);
   9124 			return;
   9125 		}
   9126 		mii->mii_media_active |= IFM_1000_SX;
   9127 		if ((reg & PCS_LSTS_FDX) != 0)
   9128 			mii->mii_media_active |= IFM_FDX;
   9129 		else
   9130 			mii->mii_media_active |= IFM_HDX;
   9131 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9132 			/* Check flow */
   9133 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9134 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9135 				DPRINTF(WM_DEBUG_LINK,
   9136 				    ("XXX LINKOK but not ACOMP\n"));
   9137 				return;
   9138 			}
   9139 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9140 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9141 			DPRINTF(WM_DEBUG_LINK,
   9142 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9143 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9144 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9145 				mii->mii_media_active |= IFM_FLOW
   9146 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9147 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9148 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9149 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9150 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9151 				mii->mii_media_active |= IFM_FLOW
   9152 				    | IFM_ETH_TXPAUSE;
   9153 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9154 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9155 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9156 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9157 				mii->mii_media_active |= IFM_FLOW
   9158 				    | IFM_ETH_RXPAUSE;
   9159 		}
   9160 		/* Update LED */
   9161 		wm_tbi_serdes_set_linkled(sc);
   9162 	} else {
   9163 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9164 		    device_xname(sc->sc_dev)));
   9165 	}
   9166 }
   9167 
   9168 /*
   9169  * wm_linkintr:
   9170  *
   9171  *	Helper; handle link interrupts.
   9172  */
   9173 static void
   9174 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9175 {
   9176 
   9177 	KASSERT(WM_CORE_LOCKED(sc));
   9178 
   9179 	if (sc->sc_flags & WM_F_HAS_MII)
   9180 		wm_linkintr_gmii(sc, icr);
   9181 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9182 	    && (sc->sc_type >= WM_T_82575))
   9183 		wm_linkintr_serdes(sc, icr);
   9184 	else
   9185 		wm_linkintr_tbi(sc, icr);
   9186 }
   9187 
   9188 /*
   9189  * wm_intr_legacy:
   9190  *
   9191  *	Interrupt service routine for INTx and MSI.
   9192  */
   9193 static int
   9194 wm_intr_legacy(void *arg)
   9195 {
   9196 	struct wm_softc *sc = arg;
   9197 	struct wm_queue *wmq = &sc->sc_queue[0];
   9198 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9199 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9200 	uint32_t icr, rndval = 0;
   9201 	int handled = 0;
   9202 
   9203 	while (1 /* CONSTCOND */) {
   9204 		icr = CSR_READ(sc, WMREG_ICR);
   9205 		if ((icr & sc->sc_icr) == 0)
   9206 			break;
   9207 		if (handled == 0) {
   9208 			DPRINTF(WM_DEBUG_TX,
   9209 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9210 		}
   9211 		if (rndval == 0)
   9212 			rndval = icr;
   9213 
   9214 		mutex_enter(rxq->rxq_lock);
   9215 
   9216 		if (rxq->rxq_stopping) {
   9217 			mutex_exit(rxq->rxq_lock);
   9218 			break;
   9219 		}
   9220 
   9221 		handled = 1;
   9222 
   9223 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9224 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9225 			DPRINTF(WM_DEBUG_RX,
   9226 			    ("%s: RX: got Rx intr 0x%08x\n",
   9227 				device_xname(sc->sc_dev),
   9228 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9229 			WM_Q_EVCNT_INCR(rxq, intr);
   9230 		}
   9231 #endif
   9232 		/*
   9233 		 * wm_rxeof() does *not* call upper layer functions directly,
   9234 		 * as if_percpuq_enqueue() just call softint_schedule().
   9235 		 * So, we can call wm_rxeof() in interrupt context.
   9236 		 */
   9237 		wm_rxeof(rxq, UINT_MAX);
   9238 
   9239 		mutex_exit(rxq->rxq_lock);
   9240 		mutex_enter(txq->txq_lock);
   9241 
   9242 		if (txq->txq_stopping) {
   9243 			mutex_exit(txq->txq_lock);
   9244 			break;
   9245 		}
   9246 
   9247 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9248 		if (icr & ICR_TXDW) {
   9249 			DPRINTF(WM_DEBUG_TX,
   9250 			    ("%s: TX: got TXDW interrupt\n",
   9251 				device_xname(sc->sc_dev)));
   9252 			WM_Q_EVCNT_INCR(txq, txdw);
   9253 		}
   9254 #endif
   9255 		wm_txeof(txq, UINT_MAX);
   9256 
   9257 		mutex_exit(txq->txq_lock);
   9258 		WM_CORE_LOCK(sc);
   9259 
   9260 		if (sc->sc_core_stopping) {
   9261 			WM_CORE_UNLOCK(sc);
   9262 			break;
   9263 		}
   9264 
   9265 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9266 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9267 			wm_linkintr(sc, icr);
   9268 		}
   9269 
   9270 		WM_CORE_UNLOCK(sc);
   9271 
   9272 		if (icr & ICR_RXO) {
   9273 #if defined(WM_DEBUG)
   9274 			log(LOG_WARNING, "%s: Receive overrun\n",
   9275 			    device_xname(sc->sc_dev));
   9276 #endif /* defined(WM_DEBUG) */
   9277 		}
   9278 	}
   9279 
   9280 	rnd_add_uint32(&sc->rnd_source, rndval);
   9281 
   9282 	if (handled) {
   9283 		/* Try to get more packets going. */
   9284 		softint_schedule(wmq->wmq_si);
   9285 	}
   9286 
   9287 	return handled;
   9288 }
   9289 
   9290 static inline void
   9291 wm_txrxintr_disable(struct wm_queue *wmq)
   9292 {
   9293 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9294 
   9295 	if (sc->sc_type == WM_T_82574)
   9296 		CSR_WRITE(sc, WMREG_IMC,
   9297 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9298 	else if (sc->sc_type == WM_T_82575)
   9299 		CSR_WRITE(sc, WMREG_EIMC,
   9300 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9301 	else
   9302 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9303 }
   9304 
   9305 static inline void
   9306 wm_txrxintr_enable(struct wm_queue *wmq)
   9307 {
   9308 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9309 
   9310 	wm_itrs_calculate(sc, wmq);
   9311 
   9312 	/*
   9313 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9314 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9315 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9316 	 * while each wm_handle_queue(wmq) is runnig.
   9317 	 */
   9318 	if (sc->sc_type == WM_T_82574)
   9319 		CSR_WRITE(sc, WMREG_IMS,
   9320 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9321 	else if (sc->sc_type == WM_T_82575)
   9322 		CSR_WRITE(sc, WMREG_EIMS,
   9323 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9324 	else
   9325 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9326 }
   9327 
   9328 static int
   9329 wm_txrxintr_msix(void *arg)
   9330 {
   9331 	struct wm_queue *wmq = arg;
   9332 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9333 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9334 	struct wm_softc *sc = txq->txq_sc;
   9335 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9336 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9337 	bool txmore;
   9338 	bool rxmore;
   9339 
   9340 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9341 
   9342 	DPRINTF(WM_DEBUG_TX,
   9343 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9344 
   9345 	wm_txrxintr_disable(wmq);
   9346 
   9347 	mutex_enter(txq->txq_lock);
   9348 
   9349 	if (txq->txq_stopping) {
   9350 		mutex_exit(txq->txq_lock);
   9351 		return 0;
   9352 	}
   9353 
   9354 	WM_Q_EVCNT_INCR(txq, txdw);
   9355 	txmore = wm_txeof(txq, txlimit);
   9356 	/* wm_deferred start() is done in wm_handle_queue(). */
   9357 	mutex_exit(txq->txq_lock);
   9358 
   9359 	DPRINTF(WM_DEBUG_RX,
   9360 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9361 	mutex_enter(rxq->rxq_lock);
   9362 
   9363 	if (rxq->rxq_stopping) {
   9364 		mutex_exit(rxq->rxq_lock);
   9365 		return 0;
   9366 	}
   9367 
   9368 	WM_Q_EVCNT_INCR(rxq, intr);
   9369 	rxmore = wm_rxeof(rxq, rxlimit);
   9370 	mutex_exit(rxq->rxq_lock);
   9371 
   9372 	wm_itrs_writereg(sc, wmq);
   9373 
   9374 	if (txmore || rxmore)
   9375 		softint_schedule(wmq->wmq_si);
   9376 	else
   9377 		wm_txrxintr_enable(wmq);
   9378 
   9379 	return 1;
   9380 }
   9381 
   9382 static void
   9383 wm_handle_queue(void *arg)
   9384 {
   9385 	struct wm_queue *wmq = arg;
   9386 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9387 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9388 	struct wm_softc *sc = txq->txq_sc;
   9389 	u_int txlimit = sc->sc_tx_process_limit;
   9390 	u_int rxlimit = sc->sc_rx_process_limit;
   9391 	bool txmore;
   9392 	bool rxmore;
   9393 
   9394 	mutex_enter(txq->txq_lock);
   9395 	if (txq->txq_stopping) {
   9396 		mutex_exit(txq->txq_lock);
   9397 		return;
   9398 	}
   9399 	txmore = wm_txeof(txq, txlimit);
   9400 	wm_deferred_start_locked(txq);
   9401 	mutex_exit(txq->txq_lock);
   9402 
   9403 	mutex_enter(rxq->rxq_lock);
   9404 	if (rxq->rxq_stopping) {
   9405 		mutex_exit(rxq->rxq_lock);
   9406 		return;
   9407 	}
   9408 	WM_Q_EVCNT_INCR(rxq, defer);
   9409 	rxmore = wm_rxeof(rxq, rxlimit);
   9410 	mutex_exit(rxq->rxq_lock);
   9411 
   9412 	if (txmore || rxmore)
   9413 		softint_schedule(wmq->wmq_si);
   9414 	else
   9415 		wm_txrxintr_enable(wmq);
   9416 }
   9417 
   9418 /*
   9419  * wm_linkintr_msix:
   9420  *
   9421  *	Interrupt service routine for link status change for MSI-X.
   9422  */
   9423 static int
   9424 wm_linkintr_msix(void *arg)
   9425 {
   9426 	struct wm_softc *sc = arg;
   9427 	uint32_t reg;
   9428 	bool has_rxo;
   9429 
   9430 	DPRINTF(WM_DEBUG_LINK,
   9431 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9432 
   9433 	reg = CSR_READ(sc, WMREG_ICR);
   9434 	WM_CORE_LOCK(sc);
   9435 	if (sc->sc_core_stopping)
   9436 		goto out;
   9437 
   9438 	if ((reg & ICR_LSC) != 0) {
   9439 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9440 		wm_linkintr(sc, ICR_LSC);
   9441 	}
   9442 
   9443 	/*
   9444 	 * XXX 82574 MSI-X mode workaround
   9445 	 *
   9446 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9447 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9448 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9449 	 * interrupts by writing WMREG_ICS to process receive packets.
   9450 	 */
   9451 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9452 #if defined(WM_DEBUG)
   9453 		log(LOG_WARNING, "%s: Receive overrun\n",
   9454 		    device_xname(sc->sc_dev));
   9455 #endif /* defined(WM_DEBUG) */
   9456 
   9457 		has_rxo = true;
   9458 		/*
   9459 		 * The RXO interrupt is very high rate when receive traffic is
   9460 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9461 		 * interrupts. ICR_OTHER will be enabled at the end of
   9462 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9463 		 * ICR_RXQ(1) interrupts.
   9464 		 */
   9465 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9466 
   9467 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9468 	}
   9469 
   9470 
   9471 
   9472 out:
   9473 	WM_CORE_UNLOCK(sc);
   9474 
   9475 	if (sc->sc_type == WM_T_82574) {
   9476 		if (!has_rxo)
   9477 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9478 		else
   9479 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9480 	} else if (sc->sc_type == WM_T_82575)
   9481 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9482 	else
   9483 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9484 
   9485 	return 1;
   9486 }
   9487 
   9488 /*
   9489  * Media related.
   9490  * GMII, SGMII, TBI (and SERDES)
   9491  */
   9492 
   9493 /* Common */
   9494 
   9495 /*
   9496  * wm_tbi_serdes_set_linkled:
   9497  *
   9498  *	Update the link LED on TBI and SERDES devices.
   9499  */
   9500 static void
   9501 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9502 {
   9503 
   9504 	if (sc->sc_tbi_linkup)
   9505 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9506 	else
   9507 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9508 
   9509 	/* 82540 or newer devices are active low */
   9510 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9511 
   9512 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9513 }
   9514 
   9515 /* GMII related */
   9516 
   9517 /*
   9518  * wm_gmii_reset:
   9519  *
   9520  *	Reset the PHY.
   9521  */
   9522 static void
   9523 wm_gmii_reset(struct wm_softc *sc)
   9524 {
   9525 	uint32_t reg;
   9526 	int rv;
   9527 
   9528 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9529 		device_xname(sc->sc_dev), __func__));
   9530 
   9531 	rv = sc->phy.acquire(sc);
   9532 	if (rv != 0) {
   9533 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9534 		    __func__);
   9535 		return;
   9536 	}
   9537 
   9538 	switch (sc->sc_type) {
   9539 	case WM_T_82542_2_0:
   9540 	case WM_T_82542_2_1:
   9541 		/* null */
   9542 		break;
   9543 	case WM_T_82543:
   9544 		/*
   9545 		 * With 82543, we need to force speed and duplex on the MAC
   9546 		 * equal to what the PHY speed and duplex configuration is.
   9547 		 * In addition, we need to perform a hardware reset on the PHY
   9548 		 * to take it out of reset.
   9549 		 */
   9550 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9551 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9552 
   9553 		/* The PHY reset pin is active-low. */
   9554 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9555 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9556 		    CTRL_EXT_SWDPIN(4));
   9557 		reg |= CTRL_EXT_SWDPIO(4);
   9558 
   9559 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9560 		CSR_WRITE_FLUSH(sc);
   9561 		delay(10*1000);
   9562 
   9563 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9564 		CSR_WRITE_FLUSH(sc);
   9565 		delay(150);
   9566 #if 0
   9567 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9568 #endif
   9569 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9570 		break;
   9571 	case WM_T_82544:	/* reset 10000us */
   9572 	case WM_T_82540:
   9573 	case WM_T_82545:
   9574 	case WM_T_82545_3:
   9575 	case WM_T_82546:
   9576 	case WM_T_82546_3:
   9577 	case WM_T_82541:
   9578 	case WM_T_82541_2:
   9579 	case WM_T_82547:
   9580 	case WM_T_82547_2:
   9581 	case WM_T_82571:	/* reset 100us */
   9582 	case WM_T_82572:
   9583 	case WM_T_82573:
   9584 	case WM_T_82574:
   9585 	case WM_T_82575:
   9586 	case WM_T_82576:
   9587 	case WM_T_82580:
   9588 	case WM_T_I350:
   9589 	case WM_T_I354:
   9590 	case WM_T_I210:
   9591 	case WM_T_I211:
   9592 	case WM_T_82583:
   9593 	case WM_T_80003:
   9594 		/* generic reset */
   9595 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9596 		CSR_WRITE_FLUSH(sc);
   9597 		delay(20000);
   9598 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9599 		CSR_WRITE_FLUSH(sc);
   9600 		delay(20000);
   9601 
   9602 		if ((sc->sc_type == WM_T_82541)
   9603 		    || (sc->sc_type == WM_T_82541_2)
   9604 		    || (sc->sc_type == WM_T_82547)
   9605 		    || (sc->sc_type == WM_T_82547_2)) {
   9606 			/* workaround for igp are done in igp_reset() */
   9607 			/* XXX add code to set LED after phy reset */
   9608 		}
   9609 		break;
   9610 	case WM_T_ICH8:
   9611 	case WM_T_ICH9:
   9612 	case WM_T_ICH10:
   9613 	case WM_T_PCH:
   9614 	case WM_T_PCH2:
   9615 	case WM_T_PCH_LPT:
   9616 	case WM_T_PCH_SPT:
   9617 	case WM_T_PCH_CNP:
   9618 		/* generic reset */
   9619 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9620 		CSR_WRITE_FLUSH(sc);
   9621 		delay(100);
   9622 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9623 		CSR_WRITE_FLUSH(sc);
   9624 		delay(150);
   9625 		break;
   9626 	default:
   9627 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9628 		    __func__);
   9629 		break;
   9630 	}
   9631 
   9632 	sc->phy.release(sc);
   9633 
   9634 	/* get_cfg_done */
   9635 	wm_get_cfg_done(sc);
   9636 
   9637 	/* extra setup */
   9638 	switch (sc->sc_type) {
   9639 	case WM_T_82542_2_0:
   9640 	case WM_T_82542_2_1:
   9641 	case WM_T_82543:
   9642 	case WM_T_82544:
   9643 	case WM_T_82540:
   9644 	case WM_T_82545:
   9645 	case WM_T_82545_3:
   9646 	case WM_T_82546:
   9647 	case WM_T_82546_3:
   9648 	case WM_T_82541_2:
   9649 	case WM_T_82547_2:
   9650 	case WM_T_82571:
   9651 	case WM_T_82572:
   9652 	case WM_T_82573:
   9653 	case WM_T_82574:
   9654 	case WM_T_82583:
   9655 	case WM_T_82575:
   9656 	case WM_T_82576:
   9657 	case WM_T_82580:
   9658 	case WM_T_I350:
   9659 	case WM_T_I354:
   9660 	case WM_T_I210:
   9661 	case WM_T_I211:
   9662 	case WM_T_80003:
   9663 		/* null */
   9664 		break;
   9665 	case WM_T_82541:
   9666 	case WM_T_82547:
   9667 		/* XXX Configure actively LED after PHY reset */
   9668 		break;
   9669 	case WM_T_ICH8:
   9670 	case WM_T_ICH9:
   9671 	case WM_T_ICH10:
   9672 	case WM_T_PCH:
   9673 	case WM_T_PCH2:
   9674 	case WM_T_PCH_LPT:
   9675 	case WM_T_PCH_SPT:
   9676 	case WM_T_PCH_CNP:
   9677 		wm_phy_post_reset(sc);
   9678 		break;
   9679 	default:
   9680 		panic("%s: unknown type\n", __func__);
   9681 		break;
   9682 	}
   9683 }
   9684 
   9685 /*
   9686  * Setup sc_phytype and mii_{read|write}reg.
   9687  *
   9688  *  To identify PHY type, correct read/write function should be selected.
   9689  * To select correct read/write function, PCI ID or MAC type are required
   9690  * without accessing PHY registers.
   9691  *
   9692  *  On the first call of this function, PHY ID is not known yet. Check
   9693  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9694  * result might be incorrect.
   9695  *
   9696  *  In the second call, PHY OUI and model is used to identify PHY type.
   9697  * It might not be perfpect because of the lack of compared entry, but it
   9698  * would be better than the first call.
   9699  *
   9700  *  If the detected new result and previous assumption is different,
   9701  * diagnous message will be printed.
   9702  */
   9703 static void
   9704 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9705     uint16_t phy_model)
   9706 {
   9707 	device_t dev = sc->sc_dev;
   9708 	struct mii_data *mii = &sc->sc_mii;
   9709 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9710 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9711 	mii_readreg_t new_readreg;
   9712 	mii_writereg_t new_writereg;
   9713 
   9714 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9715 		device_xname(sc->sc_dev), __func__));
   9716 
   9717 	if (mii->mii_readreg == NULL) {
   9718 		/*
   9719 		 *  This is the first call of this function. For ICH and PCH
   9720 		 * variants, it's difficult to determine the PHY access method
   9721 		 * by sc_type, so use the PCI product ID for some devices.
   9722 		 */
   9723 
   9724 		switch (sc->sc_pcidevid) {
   9725 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9726 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9727 			/* 82577 */
   9728 			new_phytype = WMPHY_82577;
   9729 			break;
   9730 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9731 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9732 			/* 82578 */
   9733 			new_phytype = WMPHY_82578;
   9734 			break;
   9735 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9736 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9737 			/* 82579 */
   9738 			new_phytype = WMPHY_82579;
   9739 			break;
   9740 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9741 		case PCI_PRODUCT_INTEL_82801I_BM:
   9742 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9743 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9744 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9745 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9746 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9747 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9748 			/* ICH8, 9, 10 with 82567 */
   9749 			new_phytype = WMPHY_BM;
   9750 			break;
   9751 		default:
   9752 			break;
   9753 		}
   9754 	} else {
   9755 		/* It's not the first call. Use PHY OUI and model */
   9756 		switch (phy_oui) {
   9757 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9758 			switch (phy_model) {
   9759 			case 0x0004: /* XXX */
   9760 				new_phytype = WMPHY_82578;
   9761 				break;
   9762 			default:
   9763 				break;
   9764 			}
   9765 			break;
   9766 		case MII_OUI_xxMARVELL:
   9767 			switch (phy_model) {
   9768 			case MII_MODEL_xxMARVELL_I210:
   9769 				new_phytype = WMPHY_I210;
   9770 				break;
   9771 			case MII_MODEL_xxMARVELL_E1011:
   9772 			case MII_MODEL_xxMARVELL_E1000_3:
   9773 			case MII_MODEL_xxMARVELL_E1000_5:
   9774 			case MII_MODEL_xxMARVELL_E1112:
   9775 				new_phytype = WMPHY_M88;
   9776 				break;
   9777 			case MII_MODEL_xxMARVELL_E1149:
   9778 				new_phytype = WMPHY_BM;
   9779 				break;
   9780 			case MII_MODEL_xxMARVELL_E1111:
   9781 			case MII_MODEL_xxMARVELL_I347:
   9782 			case MII_MODEL_xxMARVELL_E1512:
   9783 			case MII_MODEL_xxMARVELL_E1340M:
   9784 			case MII_MODEL_xxMARVELL_E1543:
   9785 				new_phytype = WMPHY_M88;
   9786 				break;
   9787 			case MII_MODEL_xxMARVELL_I82563:
   9788 				new_phytype = WMPHY_GG82563;
   9789 				break;
   9790 			default:
   9791 				break;
   9792 			}
   9793 			break;
   9794 		case MII_OUI_INTEL:
   9795 			switch (phy_model) {
   9796 			case MII_MODEL_INTEL_I82577:
   9797 				new_phytype = WMPHY_82577;
   9798 				break;
   9799 			case MII_MODEL_INTEL_I82579:
   9800 				new_phytype = WMPHY_82579;
   9801 				break;
   9802 			case MII_MODEL_INTEL_I217:
   9803 				new_phytype = WMPHY_I217;
   9804 				break;
   9805 			case MII_MODEL_INTEL_I82580:
   9806 			case MII_MODEL_INTEL_I350:
   9807 				new_phytype = WMPHY_82580;
   9808 				break;
   9809 			default:
   9810 				break;
   9811 			}
   9812 			break;
   9813 		case MII_OUI_yyINTEL:
   9814 			switch (phy_model) {
   9815 			case MII_MODEL_yyINTEL_I82562G:
   9816 			case MII_MODEL_yyINTEL_I82562EM:
   9817 			case MII_MODEL_yyINTEL_I82562ET:
   9818 				new_phytype = WMPHY_IFE;
   9819 				break;
   9820 			case MII_MODEL_yyINTEL_IGP01E1000:
   9821 				new_phytype = WMPHY_IGP;
   9822 				break;
   9823 			case MII_MODEL_yyINTEL_I82566:
   9824 				new_phytype = WMPHY_IGP_3;
   9825 				break;
   9826 			default:
   9827 				break;
   9828 			}
   9829 			break;
   9830 		default:
   9831 			break;
   9832 		}
   9833 		if (new_phytype == WMPHY_UNKNOWN)
   9834 			aprint_verbose_dev(dev,
   9835 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9836 			    __func__, phy_oui, phy_model);
   9837 
   9838 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9839 		    && (sc->sc_phytype != new_phytype )) {
   9840 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9841 			    "was incorrect. PHY type from PHY ID = %u\n",
   9842 			    sc->sc_phytype, new_phytype);
   9843 		}
   9844 	}
   9845 
   9846 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9847 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9848 		/* SGMII */
   9849 		new_readreg = wm_sgmii_readreg;
   9850 		new_writereg = wm_sgmii_writereg;
   9851 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9852 		/* BM2 (phyaddr == 1) */
   9853 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9854 		    && (new_phytype != WMPHY_BM)
   9855 		    && (new_phytype != WMPHY_UNKNOWN))
   9856 			doubt_phytype = new_phytype;
   9857 		new_phytype = WMPHY_BM;
   9858 		new_readreg = wm_gmii_bm_readreg;
   9859 		new_writereg = wm_gmii_bm_writereg;
   9860 	} else if (sc->sc_type >= WM_T_PCH) {
   9861 		/* All PCH* use _hv_ */
   9862 		new_readreg = wm_gmii_hv_readreg;
   9863 		new_writereg = wm_gmii_hv_writereg;
   9864 	} else if (sc->sc_type >= WM_T_ICH8) {
   9865 		/* non-82567 ICH8, 9 and 10 */
   9866 		new_readreg = wm_gmii_i82544_readreg;
   9867 		new_writereg = wm_gmii_i82544_writereg;
   9868 	} else if (sc->sc_type >= WM_T_80003) {
   9869 		/* 80003 */
   9870 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9871 		    && (new_phytype != WMPHY_GG82563)
   9872 		    && (new_phytype != WMPHY_UNKNOWN))
   9873 			doubt_phytype = new_phytype;
   9874 		new_phytype = WMPHY_GG82563;
   9875 		new_readreg = wm_gmii_i80003_readreg;
   9876 		new_writereg = wm_gmii_i80003_writereg;
   9877 	} else if (sc->sc_type >= WM_T_I210) {
   9878 		/* I210 and I211 */
   9879 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9880 		    && (new_phytype != WMPHY_I210)
   9881 		    && (new_phytype != WMPHY_UNKNOWN))
   9882 			doubt_phytype = new_phytype;
   9883 		new_phytype = WMPHY_I210;
   9884 		new_readreg = wm_gmii_gs40g_readreg;
   9885 		new_writereg = wm_gmii_gs40g_writereg;
   9886 	} else if (sc->sc_type >= WM_T_82580) {
   9887 		/* 82580, I350 and I354 */
   9888 		new_readreg = wm_gmii_82580_readreg;
   9889 		new_writereg = wm_gmii_82580_writereg;
   9890 	} else if (sc->sc_type >= WM_T_82544) {
   9891 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9892 		new_readreg = wm_gmii_i82544_readreg;
   9893 		new_writereg = wm_gmii_i82544_writereg;
   9894 	} else {
   9895 		new_readreg = wm_gmii_i82543_readreg;
   9896 		new_writereg = wm_gmii_i82543_writereg;
   9897 	}
   9898 
   9899 	if (new_phytype == WMPHY_BM) {
   9900 		/* All BM use _bm_ */
   9901 		new_readreg = wm_gmii_bm_readreg;
   9902 		new_writereg = wm_gmii_bm_writereg;
   9903 	}
   9904 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9905 		/* All PCH* use _hv_ */
   9906 		new_readreg = wm_gmii_hv_readreg;
   9907 		new_writereg = wm_gmii_hv_writereg;
   9908 	}
   9909 
   9910 	/* Diag output */
   9911 	if (doubt_phytype != WMPHY_UNKNOWN)
   9912 		aprint_error_dev(dev, "Assumed new PHY type was "
   9913 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9914 		    new_phytype);
   9915 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9916 	    && (sc->sc_phytype != new_phytype ))
   9917 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9918 		    "was incorrect. New PHY type = %u\n",
   9919 		    sc->sc_phytype, new_phytype);
   9920 
   9921 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9922 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9923 
   9924 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9925 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9926 		    "function was incorrect.\n");
   9927 
   9928 	/* Update now */
   9929 	sc->sc_phytype = new_phytype;
   9930 	mii->mii_readreg = new_readreg;
   9931 	mii->mii_writereg = new_writereg;
   9932 	if (new_readreg == wm_gmii_hv_readreg) {
   9933 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9934 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9935 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9936 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9937 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9938 	}
   9939 }
   9940 
   9941 /*
   9942  * wm_get_phy_id_82575:
   9943  *
   9944  * Return PHY ID. Return -1 if it failed.
   9945  */
   9946 static int
   9947 wm_get_phy_id_82575(struct wm_softc *sc)
   9948 {
   9949 	uint32_t reg;
   9950 	int phyid = -1;
   9951 
   9952 	/* XXX */
   9953 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9954 		return -1;
   9955 
   9956 	if (wm_sgmii_uses_mdio(sc)) {
   9957 		switch (sc->sc_type) {
   9958 		case WM_T_82575:
   9959 		case WM_T_82576:
   9960 			reg = CSR_READ(sc, WMREG_MDIC);
   9961 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9962 			break;
   9963 		case WM_T_82580:
   9964 		case WM_T_I350:
   9965 		case WM_T_I354:
   9966 		case WM_T_I210:
   9967 		case WM_T_I211:
   9968 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9969 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9970 			break;
   9971 		default:
   9972 			return -1;
   9973 		}
   9974 	}
   9975 
   9976 	return phyid;
   9977 }
   9978 
   9979 
   9980 /*
   9981  * wm_gmii_mediainit:
   9982  *
   9983  *	Initialize media for use on 1000BASE-T devices.
   9984  */
   9985 static void
   9986 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9987 {
   9988 	device_t dev = sc->sc_dev;
   9989 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9990 	struct mii_data *mii = &sc->sc_mii;
   9991 	uint32_t reg;
   9992 
   9993 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9994 		device_xname(sc->sc_dev), __func__));
   9995 
   9996 	/* We have GMII. */
   9997 	sc->sc_flags |= WM_F_HAS_MII;
   9998 
   9999 	if (sc->sc_type == WM_T_80003)
   10000 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10001 	else
   10002 		sc->sc_tipg = TIPG_1000T_DFLT;
   10003 
   10004 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10005 	if ((sc->sc_type == WM_T_82580)
   10006 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10007 	    || (sc->sc_type == WM_T_I211)) {
   10008 		reg = CSR_READ(sc, WMREG_PHPM);
   10009 		reg &= ~PHPM_GO_LINK_D;
   10010 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10011 	}
   10012 
   10013 	/*
   10014 	 * Let the chip set speed/duplex on its own based on
   10015 	 * signals from the PHY.
   10016 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10017 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10018 	 */
   10019 	sc->sc_ctrl |= CTRL_SLU;
   10020 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10021 
   10022 	/* Initialize our media structures and probe the GMII. */
   10023 	mii->mii_ifp = ifp;
   10024 
   10025 	mii->mii_statchg = wm_gmii_statchg;
   10026 
   10027 	/* get PHY control from SMBus to PCIe */
   10028 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10029 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10030 	    || (sc->sc_type == WM_T_PCH_CNP))
   10031 		wm_init_phy_workarounds_pchlan(sc);
   10032 
   10033 	wm_gmii_reset(sc);
   10034 
   10035 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10036 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10037 	    wm_gmii_mediastatus);
   10038 
   10039 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10040 	    || (sc->sc_type == WM_T_82580)
   10041 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10042 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10043 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10044 			/* Attach only one port */
   10045 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10046 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10047 		} else {
   10048 			int i, id;
   10049 			uint32_t ctrl_ext;
   10050 
   10051 			id = wm_get_phy_id_82575(sc);
   10052 			if (id != -1) {
   10053 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10054 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10055 			}
   10056 			if ((id == -1)
   10057 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10058 				/* Power on sgmii phy if it is disabled */
   10059 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10060 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10061 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10062 				CSR_WRITE_FLUSH(sc);
   10063 				delay(300*1000); /* XXX too long */
   10064 
   10065 				/* from 1 to 8 */
   10066 				for (i = 1; i < 8; i++)
   10067 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10068 					    0xffffffff, i, MII_OFFSET_ANY,
   10069 					    MIIF_DOPAUSE);
   10070 
   10071 				/* restore previous sfp cage power state */
   10072 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10073 			}
   10074 		}
   10075 	} else
   10076 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10077 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10078 
   10079 	/*
   10080 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10081 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10082 	 */
   10083 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10084 		|| (sc->sc_type == WM_T_PCH_SPT)
   10085 		|| (sc->sc_type == WM_T_PCH_CNP))
   10086 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10087 		wm_set_mdio_slow_mode_hv(sc);
   10088 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10089 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10090 	}
   10091 
   10092 	/*
   10093 	 * (For ICH8 variants)
   10094 	 * If PHY detection failed, use BM's r/w function and retry.
   10095 	 */
   10096 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10097 		/* if failed, retry with *_bm_* */
   10098 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10099 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10100 		    sc->sc_phytype);
   10101 		sc->sc_phytype = WMPHY_BM;
   10102 		mii->mii_readreg = wm_gmii_bm_readreg;
   10103 		mii->mii_writereg = wm_gmii_bm_writereg;
   10104 
   10105 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10106 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10107 	}
   10108 
   10109 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10110 		/* Any PHY wasn't find */
   10111 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10112 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10113 		sc->sc_phytype = WMPHY_NONE;
   10114 	} else {
   10115 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10116 
   10117 		/*
   10118 		 * PHY Found! Check PHY type again by the second call of
   10119 		 * wm_gmii_setup_phytype.
   10120 		 */
   10121 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10122 		    child->mii_mpd_model);
   10123 
   10124 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10125 	}
   10126 }
   10127 
   10128 /*
   10129  * wm_gmii_mediachange:	[ifmedia interface function]
   10130  *
   10131  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10132  */
   10133 static int
   10134 wm_gmii_mediachange(struct ifnet *ifp)
   10135 {
   10136 	struct wm_softc *sc = ifp->if_softc;
   10137 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10138 	int rc;
   10139 
   10140 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10141 		device_xname(sc->sc_dev), __func__));
   10142 	if ((ifp->if_flags & IFF_UP) == 0)
   10143 		return 0;
   10144 
   10145 	/* Disable D0 LPLU. */
   10146 	wm_lplu_d0_disable(sc);
   10147 
   10148 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10149 	sc->sc_ctrl |= CTRL_SLU;
   10150 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10151 	    || (sc->sc_type > WM_T_82543)) {
   10152 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10153 	} else {
   10154 		sc->sc_ctrl &= ~CTRL_ASDE;
   10155 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10156 		if (ife->ifm_media & IFM_FDX)
   10157 			sc->sc_ctrl |= CTRL_FD;
   10158 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10159 		case IFM_10_T:
   10160 			sc->sc_ctrl |= CTRL_SPEED_10;
   10161 			break;
   10162 		case IFM_100_TX:
   10163 			sc->sc_ctrl |= CTRL_SPEED_100;
   10164 			break;
   10165 		case IFM_1000_T:
   10166 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10167 			break;
   10168 		case IFM_NONE:
   10169 			/* There is no specific setting for IFM_NONE */
   10170 			break;
   10171 		default:
   10172 			panic("wm_gmii_mediachange: bad media 0x%x",
   10173 			    ife->ifm_media);
   10174 		}
   10175 	}
   10176 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10177 	CSR_WRITE_FLUSH(sc);
   10178 	if (sc->sc_type <= WM_T_82543)
   10179 		wm_gmii_reset(sc);
   10180 
   10181 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10182 		return 0;
   10183 	return rc;
   10184 }
   10185 
   10186 /*
   10187  * wm_gmii_mediastatus:	[ifmedia interface function]
   10188  *
   10189  *	Get the current interface media status on a 1000BASE-T device.
   10190  */
   10191 static void
   10192 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10193 {
   10194 	struct wm_softc *sc = ifp->if_softc;
   10195 
   10196 	ether_mediastatus(ifp, ifmr);
   10197 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10198 	    | sc->sc_flowflags;
   10199 }
   10200 
   10201 #define	MDI_IO		CTRL_SWDPIN(2)
   10202 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10203 #define	MDI_CLK		CTRL_SWDPIN(3)
   10204 
   10205 static void
   10206 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10207 {
   10208 	uint32_t i, v;
   10209 
   10210 	v = CSR_READ(sc, WMREG_CTRL);
   10211 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10212 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10213 
   10214 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10215 		if (data & i)
   10216 			v |= MDI_IO;
   10217 		else
   10218 			v &= ~MDI_IO;
   10219 		CSR_WRITE(sc, WMREG_CTRL, v);
   10220 		CSR_WRITE_FLUSH(sc);
   10221 		delay(10);
   10222 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10223 		CSR_WRITE_FLUSH(sc);
   10224 		delay(10);
   10225 		CSR_WRITE(sc, WMREG_CTRL, v);
   10226 		CSR_WRITE_FLUSH(sc);
   10227 		delay(10);
   10228 	}
   10229 }
   10230 
   10231 static uint32_t
   10232 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10233 {
   10234 	uint32_t v, i, data = 0;
   10235 
   10236 	v = CSR_READ(sc, WMREG_CTRL);
   10237 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10238 	v |= CTRL_SWDPIO(3);
   10239 
   10240 	CSR_WRITE(sc, WMREG_CTRL, v);
   10241 	CSR_WRITE_FLUSH(sc);
   10242 	delay(10);
   10243 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10244 	CSR_WRITE_FLUSH(sc);
   10245 	delay(10);
   10246 	CSR_WRITE(sc, WMREG_CTRL, v);
   10247 	CSR_WRITE_FLUSH(sc);
   10248 	delay(10);
   10249 
   10250 	for (i = 0; i < 16; i++) {
   10251 		data <<= 1;
   10252 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10253 		CSR_WRITE_FLUSH(sc);
   10254 		delay(10);
   10255 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10256 			data |= 1;
   10257 		CSR_WRITE(sc, WMREG_CTRL, v);
   10258 		CSR_WRITE_FLUSH(sc);
   10259 		delay(10);
   10260 	}
   10261 
   10262 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10263 	CSR_WRITE_FLUSH(sc);
   10264 	delay(10);
   10265 	CSR_WRITE(sc, WMREG_CTRL, v);
   10266 	CSR_WRITE_FLUSH(sc);
   10267 	delay(10);
   10268 
   10269 	return data;
   10270 }
   10271 
   10272 #undef MDI_IO
   10273 #undef MDI_DIR
   10274 #undef MDI_CLK
   10275 
   10276 /*
   10277  * wm_gmii_i82543_readreg:	[mii interface function]
   10278  *
   10279  *	Read a PHY register on the GMII (i82543 version).
   10280  */
   10281 static int
   10282 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10283 {
   10284 	struct wm_softc *sc = device_private(dev);
   10285 	int rv;
   10286 
   10287 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10288 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10289 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10290 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10291 
   10292 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10293 		device_xname(dev), phy, reg, rv));
   10294 
   10295 	return rv;
   10296 }
   10297 
   10298 /*
   10299  * wm_gmii_i82543_writereg:	[mii interface function]
   10300  *
   10301  *	Write a PHY register on the GMII (i82543 version).
   10302  */
   10303 static void
   10304 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10305 {
   10306 	struct wm_softc *sc = device_private(dev);
   10307 
   10308 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10309 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10310 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10311 	    (MII_COMMAND_START << 30), 32);
   10312 }
   10313 
   10314 /*
   10315  * wm_gmii_mdic_readreg:	[mii interface function]
   10316  *
   10317  *	Read a PHY register on the GMII.
   10318  */
   10319 static int
   10320 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10321 {
   10322 	struct wm_softc *sc = device_private(dev);
   10323 	uint32_t mdic = 0;
   10324 	int i, rv;
   10325 
   10326 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10327 	    && (reg > MII_ADDRMASK)) {
   10328 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10329 		    __func__, sc->sc_phytype, reg);
   10330 		reg &= MII_ADDRMASK;
   10331 	}
   10332 
   10333 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10334 	    MDIC_REGADD(reg));
   10335 
   10336 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10337 		delay(50);
   10338 		mdic = CSR_READ(sc, WMREG_MDIC);
   10339 		if (mdic & MDIC_READY)
   10340 			break;
   10341 	}
   10342 
   10343 	if ((mdic & MDIC_READY) == 0) {
   10344 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10345 		    device_xname(dev), phy, reg);
   10346 		return 0;
   10347 	} else if (mdic & MDIC_E) {
   10348 #if 0 /* This is normal if no PHY is present. */
   10349 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10350 		    device_xname(dev), phy, reg);
   10351 #endif
   10352 		return 0;
   10353 	} else {
   10354 		rv = MDIC_DATA(mdic);
   10355 		if (rv == 0xffff)
   10356 			rv = 0;
   10357 	}
   10358 
   10359 	/*
   10360 	 * Allow some time after each MDIC transaction to avoid
   10361 	 * reading duplicate data in the next MDIC transaction.
   10362 	 */
   10363 	if (sc->sc_type == WM_T_PCH2)
   10364 		delay(100);
   10365 
   10366 	return rv;
   10367 }
   10368 
   10369 /*
   10370  * wm_gmii_mdic_writereg:	[mii interface function]
   10371  *
   10372  *	Write a PHY register on the GMII.
   10373  */
   10374 static void
   10375 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10376 {
   10377 	struct wm_softc *sc = device_private(dev);
   10378 	uint32_t mdic = 0;
   10379 	int i;
   10380 
   10381 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10382 	    && (reg > MII_ADDRMASK)) {
   10383 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10384 		    __func__, sc->sc_phytype, reg);
   10385 		reg &= MII_ADDRMASK;
   10386 	}
   10387 
   10388 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10389 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10390 
   10391 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10392 		delay(50);
   10393 		mdic = CSR_READ(sc, WMREG_MDIC);
   10394 		if (mdic & MDIC_READY)
   10395 			break;
   10396 	}
   10397 
   10398 	if ((mdic & MDIC_READY) == 0) {
   10399 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10400 		    device_xname(dev), phy, reg);
   10401 		return;
   10402 	} else if (mdic & MDIC_E) {
   10403 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10404 		    device_xname(dev), phy, reg);
   10405 		return;
   10406 	}
   10407 
   10408 	/*
   10409 	 * Allow some time after each MDIC transaction to avoid
   10410 	 * reading duplicate data in the next MDIC transaction.
   10411 	 */
   10412 	if (sc->sc_type == WM_T_PCH2)
   10413 		delay(100);
   10414 }
   10415 
   10416 /*
   10417  * wm_gmii_i82544_readreg:	[mii interface function]
   10418  *
   10419  *	Read a PHY register on the GMII.
   10420  */
   10421 static int
   10422 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10423 {
   10424 	struct wm_softc *sc = device_private(dev);
   10425 	uint16_t val;
   10426 
   10427 	if (sc->phy.acquire(sc)) {
   10428 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10429 		return 0;
   10430 	}
   10431 
   10432 	wm_gmii_i82544_readreg_locked(dev, phy, reg, &val);
   10433 
   10434 	sc->phy.release(sc);
   10435 
   10436 	return val;
   10437 }
   10438 
   10439 static int
   10440 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10441 {
   10442 	struct wm_softc *sc = device_private(dev);
   10443 
   10444 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10445 		switch (sc->sc_phytype) {
   10446 		case WMPHY_IGP:
   10447 		case WMPHY_IGP_2:
   10448 		case WMPHY_IGP_3:
   10449 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10450 			    reg);
   10451 			break;
   10452 		default:
   10453 #ifdef WM_DEBUG
   10454 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10455 			    __func__, sc->sc_phytype, reg);
   10456 #endif
   10457 			break;
   10458 		}
   10459 	}
   10460 
   10461 	*val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10462 
   10463 	return 0;
   10464 }
   10465 
   10466 /*
   10467  * wm_gmii_i82544_writereg:	[mii interface function]
   10468  *
   10469  *	Write a PHY register on the GMII.
   10470  */
   10471 static void
   10472 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10473 {
   10474 	struct wm_softc *sc = device_private(dev);
   10475 
   10476 	if (sc->phy.acquire(sc)) {
   10477 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10478 		return;
   10479 	}
   10480 
   10481 	wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10482 	sc->phy.release(sc);
   10483 }
   10484 
   10485 static int
   10486 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10487 {
   10488 	struct wm_softc *sc = device_private(dev);
   10489 
   10490 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10491 		switch (sc->sc_phytype) {
   10492 		case WMPHY_IGP:
   10493 		case WMPHY_IGP_2:
   10494 		case WMPHY_IGP_3:
   10495 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10496 			    reg);
   10497 			break;
   10498 		default:
   10499 #ifdef WM_DEBUG
   10500 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10501 			    __func__, sc->sc_phytype, reg);
   10502 #endif
   10503 			break;
   10504 		}
   10505 	}
   10506 
   10507 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10508 
   10509 	return 0;
   10510 }
   10511 
   10512 /*
   10513  * wm_gmii_i80003_readreg:	[mii interface function]
   10514  *
   10515  *	Read a PHY register on the kumeran
   10516  * This could be handled by the PHY layer if we didn't have to lock the
   10517  * ressource ...
   10518  */
   10519 static int
   10520 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10521 {
   10522 	struct wm_softc *sc = device_private(dev);
   10523 	int page_select, temp;
   10524 	int rv;
   10525 
   10526 	if (phy != 1) /* only one PHY on kumeran bus */
   10527 		return 0;
   10528 
   10529 	if (sc->phy.acquire(sc)) {
   10530 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10531 		return 0;
   10532 	}
   10533 
   10534 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10535 		page_select = GG82563_PHY_PAGE_SELECT;
   10536 	else {
   10537 		/*
   10538 		 * Use Alternative Page Select register to access registers
   10539 		 * 30 and 31.
   10540 		 */
   10541 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10542 	}
   10543 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10544 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10545 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10546 		/*
   10547 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10548 		 * register.
   10549 		 */
   10550 		delay(200);
   10551 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10552 			device_printf(dev, "%s failed\n", __func__);
   10553 			rv = 0; /* XXX */
   10554 			goto out;
   10555 		}
   10556 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10557 		delay(200);
   10558 	} else
   10559 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10560 
   10561 out:
   10562 	sc->phy.release(sc);
   10563 	return rv;
   10564 }
   10565 
   10566 /*
   10567  * wm_gmii_i80003_writereg:	[mii interface function]
   10568  *
   10569  *	Write a PHY register on the kumeran.
   10570  * This could be handled by the PHY layer if we didn't have to lock the
   10571  * ressource ...
   10572  */
   10573 static void
   10574 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10575 {
   10576 	struct wm_softc *sc = device_private(dev);
   10577 	int page_select, temp;
   10578 
   10579 	if (phy != 1) /* only one PHY on kumeran bus */
   10580 		return;
   10581 
   10582 	if (sc->phy.acquire(sc)) {
   10583 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10584 		return;
   10585 	}
   10586 
   10587 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10588 		page_select = GG82563_PHY_PAGE_SELECT;
   10589 	else {
   10590 		/*
   10591 		 * Use Alternative Page Select register to access registers
   10592 		 * 30 and 31.
   10593 		 */
   10594 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10595 	}
   10596 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10597 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10598 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10599 		/*
   10600 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10601 		 * register.
   10602 		 */
   10603 		delay(200);
   10604 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10605 			device_printf(dev, "%s failed\n", __func__);
   10606 			goto out;
   10607 		}
   10608 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10609 		delay(200);
   10610 	} else
   10611 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10612 
   10613 out:
   10614 	sc->phy.release(sc);
   10615 }
   10616 
   10617 /*
   10618  * wm_gmii_bm_readreg:	[mii interface function]
   10619  *
   10620  *	Read a PHY register on the kumeran
   10621  * This could be handled by the PHY layer if we didn't have to lock the
   10622  * ressource ...
   10623  */
   10624 static int
   10625 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10626 {
   10627 	struct wm_softc *sc = device_private(dev);
   10628 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10629 	uint16_t val;
   10630 	int rv;
   10631 
   10632 	if (sc->phy.acquire(sc)) {
   10633 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10634 		return 0;
   10635 	}
   10636 
   10637 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10638 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10639 		    || (reg == 31)) ? 1 : phy;
   10640 	/* Page 800 works differently than the rest so it has its own func */
   10641 	if (page == BM_WUC_PAGE) {
   10642 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, true, false);
   10643 		rv = val;
   10644 		goto release;
   10645 	}
   10646 
   10647 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10648 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10649 		    && (sc->sc_type != WM_T_82583))
   10650 			wm_gmii_mdic_writereg(dev, phy,
   10651 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10652 		else
   10653 			wm_gmii_mdic_writereg(dev, phy,
   10654 			    BME1000_PHY_PAGE_SELECT, page);
   10655 	}
   10656 
   10657 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10658 
   10659 release:
   10660 	sc->phy.release(sc);
   10661 	return rv;
   10662 }
   10663 
   10664 /*
   10665  * wm_gmii_bm_writereg:	[mii interface function]
   10666  *
   10667  *	Write a PHY register on the kumeran.
   10668  * This could be handled by the PHY layer if we didn't have to lock the
   10669  * ressource ...
   10670  */
   10671 static void
   10672 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10673 {
   10674 	struct wm_softc *sc = device_private(dev);
   10675 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10676 
   10677 	if (sc->phy.acquire(sc)) {
   10678 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10679 		return;
   10680 	}
   10681 
   10682 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10683 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10684 		    || (reg == 31)) ? 1 : phy;
   10685 	/* Page 800 works differently than the rest so it has its own func */
   10686 	if (page == BM_WUC_PAGE) {
   10687 		uint16_t tmp;
   10688 
   10689 		tmp = val;
   10690 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, false, false);
   10691 		goto release;
   10692 	}
   10693 
   10694 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10695 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10696 		    && (sc->sc_type != WM_T_82583))
   10697 			wm_gmii_mdic_writereg(dev, phy,
   10698 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10699 		else
   10700 			wm_gmii_mdic_writereg(dev, phy,
   10701 			    BME1000_PHY_PAGE_SELECT, page);
   10702 	}
   10703 
   10704 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10705 
   10706 release:
   10707 	sc->phy.release(sc);
   10708 }
   10709 
   10710 /*
   10711  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10712  *  @dev: pointer to the HW structure
   10713  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10714  *
   10715  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10716  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10717  */
   10718 static int
   10719 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10720 {
   10721 	uint16_t temp;
   10722 
   10723 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10724 		device_xname(dev), __func__));
   10725 
   10726 	if (!phy_regp)
   10727 		return -1;
   10728 
   10729 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10730 
   10731 	/* Select Port Control Registers page */
   10732 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10733 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10734 
   10735 	/* Read WUCE and save it */
   10736 	*phy_regp = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10737 
   10738 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10739 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10740 	 */
   10741 	temp = *phy_regp;
   10742 	temp |= BM_WUC_ENABLE_BIT;
   10743 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10744 
   10745 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp);
   10746 
   10747 	/* Select Host Wakeup Registers page - caller now able to write
   10748 	 * registers on the Wakeup registers page
   10749 	 */
   10750 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10751 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10752 
   10753 	return 0;
   10754 }
   10755 
   10756 /*
   10757  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10758  *  @dev: pointer to the HW structure
   10759  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10760  *
   10761  *  Restore BM_WUC_ENABLE_REG to its original value.
   10762  *
   10763  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10764  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10765  *  caller.
   10766  */
   10767 static int
   10768 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10769 {
   10770 
   10771 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10772 		device_xname(dev), __func__));
   10773 
   10774 	if (!phy_regp)
   10775 		return -1;
   10776 
   10777 	/* Select Port Control Registers page */
   10778 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10779 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10780 
   10781 	/* Restore 769.17 to its original value */
   10782 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10783 
   10784 	return 0;
   10785 }
   10786 
   10787 /*
   10788  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10789  *  @sc: pointer to the HW structure
   10790  *  @offset: register offset to be read or written
   10791  *  @val: pointer to the data to read or write
   10792  *  @rd: determines if operation is read or write
   10793  *  @page_set: BM_WUC_PAGE already set and access enabled
   10794  *
   10795  *  Read the PHY register at offset and store the retrieved information in
   10796  *  data, or write data to PHY register at offset.  Note the procedure to
   10797  *  access the PHY wakeup registers is different than reading the other PHY
   10798  *  registers. It works as such:
   10799  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   10800  *  2) Set page to 800 for host (801 if we were manageability)
   10801  *  3) Write the address using the address opcode (0x11)
   10802  *  4) Read or write the data using the data opcode (0x12)
   10803  *  5) Restore 769.17.2 to its original value
   10804  *
   10805  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   10806  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   10807  *
   10808  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   10809  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   10810  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   10811  */
   10812 static int
   10813 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   10814 	bool page_set)
   10815 {
   10816 	struct wm_softc *sc = device_private(dev);
   10817 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10818 	uint16_t page = BM_PHY_REG_PAGE(offset);
   10819 	uint16_t wuce;
   10820 	int rv = 0;
   10821 
   10822 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10823 		device_xname(dev), __func__));
   10824 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10825 	if ((sc->sc_type == WM_T_PCH)
   10826 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   10827 		device_printf(dev,
   10828 		    "Attempting to access page %d while gig enabled.\n", page);
   10829 	}
   10830 
   10831 	if (!page_set) {
   10832 		/* Enable access to PHY wakeup registers */
   10833 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   10834 		if (rv != 0) {
   10835 			device_printf(dev,
   10836 			    "%s: Could not enable PHY wakeup reg access\n",
   10837 			    __func__);
   10838 			return rv;
   10839 		}
   10840 	}
   10841 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   10842 		device_xname(sc->sc_dev), __func__, page, regnum));
   10843 
   10844 	/*
   10845 	 * 2) Access PHY wakeup register.
   10846 	 * See wm_access_phy_wakeup_reg_bm.
   10847 	 */
   10848 
   10849 	/* Write the Wakeup register page offset value using opcode 0x11 */
   10850 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10851 
   10852 	if (rd) {
   10853 		/* Read the Wakeup register page value using opcode 0x12 */
   10854 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10855 	} else {
   10856 		/* Write the Wakeup register page value using opcode 0x12 */
   10857 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10858 	}
   10859 
   10860 	if (!page_set)
   10861 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   10862 
   10863 	return rv;
   10864 }
   10865 
   10866 /*
   10867  * wm_gmii_hv_readreg:	[mii interface function]
   10868  *
   10869  *	Read a PHY register on the kumeran
   10870  * This could be handled by the PHY layer if we didn't have to lock the
   10871  * ressource ...
   10872  */
   10873 static int
   10874 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10875 {
   10876 	struct wm_softc *sc = device_private(dev);
   10877 	uint16_t val;
   10878 
   10879 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10880 		device_xname(dev), __func__));
   10881 	if (sc->phy.acquire(sc)) {
   10882 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10883 		return 0;
   10884 	}
   10885 
   10886 	wm_gmii_hv_readreg_locked(dev, phy, reg, &val);
   10887 	sc->phy.release(sc);
   10888 	return val;
   10889 }
   10890 
   10891 static int
   10892 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10893 {
   10894 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10895 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10896 
   10897 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10898 
   10899 	/* Page 800 works differently than the rest so it has its own func */
   10900 	if (page == BM_WUC_PAGE)
   10901 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10902 
   10903 	/*
   10904 	 * Lower than page 768 works differently than the rest so it has its
   10905 	 * own func
   10906 	 */
   10907 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10908 		printf("gmii_hv_readreg!!!\n");
   10909 		return 0;
   10910 	}
   10911 
   10912 	/*
   10913 	 * XXX I21[789] documents say that the SMBus Address register is at
   10914 	 * PHY address 01, Page 0 (not 768), Register 26.
   10915 	 */
   10916 	if (page == HV_INTC_FC_PAGE_START)
   10917 		page = 0;
   10918 
   10919 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10920 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10921 		    page << BME1000_PAGE_SHIFT);
   10922 	}
   10923 
   10924 	*val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10925 	return 0;
   10926 }
   10927 
   10928 /*
   10929  * wm_gmii_hv_writereg:	[mii interface function]
   10930  *
   10931  *	Write a PHY register on the kumeran.
   10932  * This could be handled by the PHY layer if we didn't have to lock the
   10933  * ressource ...
   10934  */
   10935 static void
   10936 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10937 {
   10938 	struct wm_softc *sc = device_private(dev);
   10939 
   10940 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10941 		device_xname(dev), __func__));
   10942 
   10943 	if (sc->phy.acquire(sc)) {
   10944 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10945 		return;
   10946 	}
   10947 
   10948 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10949 	sc->phy.release(sc);
   10950 }
   10951 
   10952 static int
   10953 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10954 {
   10955 	struct wm_softc *sc = device_private(dev);
   10956 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10957 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10958 	int rv;
   10959 
   10960 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10961 
   10962 	/* Page 800 works differently than the rest so it has its own func */
   10963 	if (page == BM_WUC_PAGE) {
   10964 		uint16_t tmp;
   10965 
   10966 		tmp = val;
   10967 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, false, false);
   10968 		return rv;
   10969 	}
   10970 
   10971 	/*
   10972 	 * Lower than page 768 works differently than the rest so it has its
   10973 	 * own func
   10974 	 */
   10975 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10976 		printf("gmii_hv_writereg!!!\n");
   10977 		return -1;
   10978 	}
   10979 
   10980 	{
   10981 		/*
   10982 		 * XXX I21[789] documents say that the SMBus Address register
   10983 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10984 		 */
   10985 		if (page == HV_INTC_FC_PAGE_START)
   10986 			page = 0;
   10987 
   10988 		/*
   10989 		 * XXX Workaround MDIO accesses being disabled after entering
   10990 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10991 		 * register is set)
   10992 		 */
   10993 		if (sc->sc_phytype == WMPHY_82578) {
   10994 			struct mii_softc *child;
   10995 
   10996 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10997 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10998 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10999 			    && ((val & (1 << 11)) != 0)) {
   11000 				printf("XXX need workaround\n");
   11001 			}
   11002 		}
   11003 
   11004 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11005 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11006 			    page << BME1000_PAGE_SHIFT);
   11007 		}
   11008 	}
   11009 
   11010 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11011 
   11012 	return 0;
   11013 }
   11014 
   11015 /*
   11016  * wm_gmii_82580_readreg:	[mii interface function]
   11017  *
   11018  *	Read a PHY register on the 82580 and I350.
   11019  * This could be handled by the PHY layer if we didn't have to lock the
   11020  * ressource ...
   11021  */
   11022 static int
   11023 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   11024 {
   11025 	struct wm_softc *sc = device_private(dev);
   11026 	int rv;
   11027 
   11028 	if (sc->phy.acquire(sc) != 0) {
   11029 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11030 		return 0;
   11031 	}
   11032 
   11033 #ifdef DIAGNOSTIC
   11034 	if (reg > MII_ADDRMASK) {
   11035 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11036 		    __func__, sc->sc_phytype, reg);
   11037 		reg &= MII_ADDRMASK;
   11038 	}
   11039 #endif
   11040 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   11041 
   11042 	sc->phy.release(sc);
   11043 	return rv;
   11044 }
   11045 
   11046 /*
   11047  * wm_gmii_82580_writereg:	[mii interface function]
   11048  *
   11049  *	Write a PHY register on the 82580 and I350.
   11050  * This could be handled by the PHY layer if we didn't have to lock the
   11051  * ressource ...
   11052  */
   11053 static void
   11054 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   11055 {
   11056 	struct wm_softc *sc = device_private(dev);
   11057 
   11058 	if (sc->phy.acquire(sc) != 0) {
   11059 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11060 		return;
   11061 	}
   11062 
   11063 #ifdef DIAGNOSTIC
   11064 	if (reg > MII_ADDRMASK) {
   11065 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11066 		    __func__, sc->sc_phytype, reg);
   11067 		reg &= MII_ADDRMASK;
   11068 	}
   11069 #endif
   11070 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   11071 
   11072 	sc->phy.release(sc);
   11073 }
   11074 
   11075 /*
   11076  * wm_gmii_gs40g_readreg:	[mii interface function]
   11077  *
   11078  *	Read a PHY register on the I2100 and I211.
   11079  * This could be handled by the PHY layer if we didn't have to lock the
   11080  * ressource ...
   11081  */
   11082 static int
   11083 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   11084 {
   11085 	struct wm_softc *sc = device_private(dev);
   11086 	int page, offset;
   11087 	int rv;
   11088 
   11089 	/* Acquire semaphore */
   11090 	if (sc->phy.acquire(sc)) {
   11091 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11092 		return 0;
   11093 	}
   11094 
   11095 	/* Page select */
   11096 	page = reg >> GS40G_PAGE_SHIFT;
   11097 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11098 
   11099 	/* Read reg */
   11100 	offset = reg & GS40G_OFFSET_MASK;
   11101 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   11102 
   11103 	sc->phy.release(sc);
   11104 	return rv;
   11105 }
   11106 
   11107 /*
   11108  * wm_gmii_gs40g_writereg:	[mii interface function]
   11109  *
   11110  *	Write a PHY register on the I210 and I211.
   11111  * This could be handled by the PHY layer if we didn't have to lock the
   11112  * ressource ...
   11113  */
   11114 static void
   11115 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   11116 {
   11117 	struct wm_softc *sc = device_private(dev);
   11118 	int page, offset;
   11119 
   11120 	/* Acquire semaphore */
   11121 	if (sc->phy.acquire(sc)) {
   11122 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11123 		return;
   11124 	}
   11125 
   11126 	/* Page select */
   11127 	page = reg >> GS40G_PAGE_SHIFT;
   11128 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11129 
   11130 	/* Write reg */
   11131 	offset = reg & GS40G_OFFSET_MASK;
   11132 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   11133 
   11134 	/* Release semaphore */
   11135 	sc->phy.release(sc);
   11136 }
   11137 
   11138 /*
   11139  * wm_gmii_statchg:	[mii interface function]
   11140  *
   11141  *	Callback from MII layer when media changes.
   11142  */
   11143 static void
   11144 wm_gmii_statchg(struct ifnet *ifp)
   11145 {
   11146 	struct wm_softc *sc = ifp->if_softc;
   11147 	struct mii_data *mii = &sc->sc_mii;
   11148 
   11149 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11150 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11151 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11152 
   11153 	/*
   11154 	 * Get flow control negotiation result.
   11155 	 */
   11156 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11157 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11158 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11159 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11160 	}
   11161 
   11162 	if (sc->sc_flowflags & IFM_FLOW) {
   11163 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11164 			sc->sc_ctrl |= CTRL_TFCE;
   11165 			sc->sc_fcrtl |= FCRTL_XONE;
   11166 		}
   11167 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11168 			sc->sc_ctrl |= CTRL_RFCE;
   11169 	}
   11170 
   11171 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11172 		DPRINTF(WM_DEBUG_LINK,
   11173 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11174 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11175 	} else {
   11176 		DPRINTF(WM_DEBUG_LINK,
   11177 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11178 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11179 	}
   11180 
   11181 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11182 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11183 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11184 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11185 	if (sc->sc_type == WM_T_80003) {
   11186 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11187 		case IFM_1000_T:
   11188 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11189 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11190 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11191 			break;
   11192 		default:
   11193 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11194 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11195 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11196 			break;
   11197 		}
   11198 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11199 	}
   11200 }
   11201 
   11202 /* kumeran related (80003, ICH* and PCH*) */
   11203 
   11204 /*
   11205  * wm_kmrn_readreg:
   11206  *
   11207  *	Read a kumeran register
   11208  */
   11209 static int
   11210 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11211 {
   11212 	int rv;
   11213 
   11214 	if (sc->sc_type == WM_T_80003)
   11215 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11216 	else
   11217 		rv = sc->phy.acquire(sc);
   11218 	if (rv != 0) {
   11219 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11220 		    __func__);
   11221 		return rv;
   11222 	}
   11223 
   11224 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11225 
   11226 	if (sc->sc_type == WM_T_80003)
   11227 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11228 	else
   11229 		sc->phy.release(sc);
   11230 
   11231 	return rv;
   11232 }
   11233 
   11234 static int
   11235 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11236 {
   11237 
   11238 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11239 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11240 	    KUMCTRLSTA_REN);
   11241 	CSR_WRITE_FLUSH(sc);
   11242 	delay(2);
   11243 
   11244 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11245 
   11246 	return 0;
   11247 }
   11248 
   11249 /*
   11250  * wm_kmrn_writereg:
   11251  *
   11252  *	Write a kumeran register
   11253  */
   11254 static int
   11255 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11256 {
   11257 	int rv;
   11258 
   11259 	if (sc->sc_type == WM_T_80003)
   11260 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11261 	else
   11262 		rv = sc->phy.acquire(sc);
   11263 	if (rv != 0) {
   11264 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11265 		    __func__);
   11266 		return rv;
   11267 	}
   11268 
   11269 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11270 
   11271 	if (sc->sc_type == WM_T_80003)
   11272 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11273 	else
   11274 		sc->phy.release(sc);
   11275 
   11276 	return rv;
   11277 }
   11278 
   11279 static int
   11280 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11281 {
   11282 
   11283 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11284 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11285 
   11286 	return 0;
   11287 }
   11288 
   11289 /* SGMII related */
   11290 
   11291 /*
   11292  * wm_sgmii_uses_mdio
   11293  *
   11294  * Check whether the transaction is to the internal PHY or the external
   11295  * MDIO interface. Return true if it's MDIO.
   11296  */
   11297 static bool
   11298 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11299 {
   11300 	uint32_t reg;
   11301 	bool ismdio = false;
   11302 
   11303 	switch (sc->sc_type) {
   11304 	case WM_T_82575:
   11305 	case WM_T_82576:
   11306 		reg = CSR_READ(sc, WMREG_MDIC);
   11307 		ismdio = ((reg & MDIC_DEST) != 0);
   11308 		break;
   11309 	case WM_T_82580:
   11310 	case WM_T_I350:
   11311 	case WM_T_I354:
   11312 	case WM_T_I210:
   11313 	case WM_T_I211:
   11314 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11315 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11316 		break;
   11317 	default:
   11318 		break;
   11319 	}
   11320 
   11321 	return ismdio;
   11322 }
   11323 
   11324 /*
   11325  * wm_sgmii_readreg:	[mii interface function]
   11326  *
   11327  *	Read a PHY register on the SGMII
   11328  * This could be handled by the PHY layer if we didn't have to lock the
   11329  * ressource ...
   11330  */
   11331 static int
   11332 wm_sgmii_readreg(device_t dev, int phy, int reg)
   11333 {
   11334 	struct wm_softc *sc = device_private(dev);
   11335 	uint32_t i2ccmd;
   11336 	int i, rv;
   11337 
   11338 	if (sc->phy.acquire(sc)) {
   11339 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11340 		return 0;
   11341 	}
   11342 
   11343 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11344 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11345 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11346 
   11347 	/* Poll the ready bit */
   11348 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11349 		delay(50);
   11350 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11351 		if (i2ccmd & I2CCMD_READY)
   11352 			break;
   11353 	}
   11354 	if ((i2ccmd & I2CCMD_READY) == 0)
   11355 		device_printf(dev, "I2CCMD Read did not complete\n");
   11356 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11357 		device_printf(dev, "I2CCMD Error bit set\n");
   11358 
   11359 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11360 
   11361 	sc->phy.release(sc);
   11362 	return rv;
   11363 }
   11364 
   11365 /*
   11366  * wm_sgmii_writereg:	[mii interface function]
   11367  *
   11368  *	Write a PHY register on the SGMII.
   11369  * This could be handled by the PHY layer if we didn't have to lock the
   11370  * ressource ...
   11371  */
   11372 static void
   11373 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11374 {
   11375 	struct wm_softc *sc = device_private(dev);
   11376 	uint32_t i2ccmd;
   11377 	int i;
   11378 	int swapdata;
   11379 
   11380 	if (sc->phy.acquire(sc) != 0) {
   11381 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11382 		return;
   11383 	}
   11384 	/* Swap the data bytes for the I2C interface */
   11385 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11386 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11387 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11388 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11389 
   11390 	/* Poll the ready bit */
   11391 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11392 		delay(50);
   11393 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11394 		if (i2ccmd & I2CCMD_READY)
   11395 			break;
   11396 	}
   11397 	if ((i2ccmd & I2CCMD_READY) == 0)
   11398 		device_printf(dev, "I2CCMD Write did not complete\n");
   11399 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11400 		device_printf(dev, "I2CCMD Error bit set\n");
   11401 
   11402 	sc->phy.release(sc);
   11403 }
   11404 
   11405 /* TBI related */
   11406 
   11407 static bool
   11408 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11409 {
   11410 	bool sig;
   11411 
   11412 	sig = ctrl & CTRL_SWDPIN(1);
   11413 
   11414 	/*
   11415 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11416 	 * detect a signal, 1 if they don't.
   11417 	 */
   11418 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11419 		sig = !sig;
   11420 
   11421 	return sig;
   11422 }
   11423 
   11424 /*
   11425  * wm_tbi_mediainit:
   11426  *
   11427  *	Initialize media for use on 1000BASE-X devices.
   11428  */
   11429 static void
   11430 wm_tbi_mediainit(struct wm_softc *sc)
   11431 {
   11432 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11433 	const char *sep = "";
   11434 
   11435 	if (sc->sc_type < WM_T_82543)
   11436 		sc->sc_tipg = TIPG_WM_DFLT;
   11437 	else
   11438 		sc->sc_tipg = TIPG_LG_DFLT;
   11439 
   11440 	sc->sc_tbi_serdes_anegticks = 5;
   11441 
   11442 	/* Initialize our media structures */
   11443 	sc->sc_mii.mii_ifp = ifp;
   11444 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11445 
   11446 	if ((sc->sc_type >= WM_T_82575)
   11447 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11448 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11449 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11450 	else
   11451 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11452 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11453 
   11454 	/*
   11455 	 * SWD Pins:
   11456 	 *
   11457 	 *	0 = Link LED (output)
   11458 	 *	1 = Loss Of Signal (input)
   11459 	 */
   11460 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11461 
   11462 	/* XXX Perhaps this is only for TBI */
   11463 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11464 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11465 
   11466 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11467 		sc->sc_ctrl &= ~CTRL_LRST;
   11468 
   11469 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11470 
   11471 #define	ADD(ss, mm, dd)							\
   11472 do {									\
   11473 	aprint_normal("%s%s", sep, ss);					\
   11474 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11475 	sep = ", ";							\
   11476 } while (/*CONSTCOND*/0)
   11477 
   11478 	aprint_normal_dev(sc->sc_dev, "");
   11479 
   11480 	if (sc->sc_type == WM_T_I354) {
   11481 		uint32_t status;
   11482 
   11483 		status = CSR_READ(sc, WMREG_STATUS);
   11484 		if (((status & STATUS_2P5_SKU) != 0)
   11485 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11486 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11487 		} else
   11488 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11489 	} else if (sc->sc_type == WM_T_82545) {
   11490 		/* Only 82545 is LX (XXX except SFP) */
   11491 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11492 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11493 	} else {
   11494 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11495 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11496 	}
   11497 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11498 	aprint_normal("\n");
   11499 
   11500 #undef ADD
   11501 
   11502 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11503 }
   11504 
   11505 /*
   11506  * wm_tbi_mediachange:	[ifmedia interface function]
   11507  *
   11508  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11509  */
   11510 static int
   11511 wm_tbi_mediachange(struct ifnet *ifp)
   11512 {
   11513 	struct wm_softc *sc = ifp->if_softc;
   11514 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11515 	uint32_t status, ctrl;
   11516 	bool signal;
   11517 	int i;
   11518 
   11519 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11520 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11521 		/* XXX need some work for >= 82571 and < 82575 */
   11522 		if (sc->sc_type < WM_T_82575)
   11523 			return 0;
   11524 	}
   11525 
   11526 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11527 	    || (sc->sc_type >= WM_T_82575))
   11528 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11529 
   11530 	sc->sc_ctrl &= ~CTRL_LRST;
   11531 	sc->sc_txcw = TXCW_ANE;
   11532 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11533 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11534 	else if (ife->ifm_media & IFM_FDX)
   11535 		sc->sc_txcw |= TXCW_FD;
   11536 	else
   11537 		sc->sc_txcw |= TXCW_HD;
   11538 
   11539 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11540 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11541 
   11542 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11543 		device_xname(sc->sc_dev), sc->sc_txcw));
   11544 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11545 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11546 	CSR_WRITE_FLUSH(sc);
   11547 	delay(1000);
   11548 
   11549 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11550 	signal = wm_tbi_havesignal(sc, ctrl);
   11551 
   11552 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11553 		signal));
   11554 
   11555 	if (signal) {
   11556 		/* Have signal; wait for the link to come up. */
   11557 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11558 			delay(10000);
   11559 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11560 				break;
   11561 		}
   11562 
   11563 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11564 			device_xname(sc->sc_dev),i));
   11565 
   11566 		status = CSR_READ(sc, WMREG_STATUS);
   11567 		DPRINTF(WM_DEBUG_LINK,
   11568 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11569 			device_xname(sc->sc_dev),status, STATUS_LU));
   11570 		if (status & STATUS_LU) {
   11571 			/* Link is up. */
   11572 			DPRINTF(WM_DEBUG_LINK,
   11573 			    ("%s: LINK: set media -> link up %s\n",
   11574 				device_xname(sc->sc_dev),
   11575 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11576 
   11577 			/*
   11578 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11579 			 * so we should update sc->sc_ctrl
   11580 			 */
   11581 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11582 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11583 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11584 			if (status & STATUS_FD)
   11585 				sc->sc_tctl |=
   11586 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11587 			else
   11588 				sc->sc_tctl |=
   11589 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11590 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11591 				sc->sc_fcrtl |= FCRTL_XONE;
   11592 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11593 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11594 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11595 			sc->sc_tbi_linkup = 1;
   11596 		} else {
   11597 			if (i == WM_LINKUP_TIMEOUT)
   11598 				wm_check_for_link(sc);
   11599 			/* Link is down. */
   11600 			DPRINTF(WM_DEBUG_LINK,
   11601 			    ("%s: LINK: set media -> link down\n",
   11602 				device_xname(sc->sc_dev)));
   11603 			sc->sc_tbi_linkup = 0;
   11604 		}
   11605 	} else {
   11606 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11607 			device_xname(sc->sc_dev)));
   11608 		sc->sc_tbi_linkup = 0;
   11609 	}
   11610 
   11611 	wm_tbi_serdes_set_linkled(sc);
   11612 
   11613 	return 0;
   11614 }
   11615 
   11616 /*
   11617  * wm_tbi_mediastatus:	[ifmedia interface function]
   11618  *
   11619  *	Get the current interface media status on a 1000BASE-X device.
   11620  */
   11621 static void
   11622 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11623 {
   11624 	struct wm_softc *sc = ifp->if_softc;
   11625 	uint32_t ctrl, status;
   11626 
   11627 	ifmr->ifm_status = IFM_AVALID;
   11628 	ifmr->ifm_active = IFM_ETHER;
   11629 
   11630 	status = CSR_READ(sc, WMREG_STATUS);
   11631 	if ((status & STATUS_LU) == 0) {
   11632 		ifmr->ifm_active |= IFM_NONE;
   11633 		return;
   11634 	}
   11635 
   11636 	ifmr->ifm_status |= IFM_ACTIVE;
   11637 	/* Only 82545 is LX */
   11638 	if (sc->sc_type == WM_T_82545)
   11639 		ifmr->ifm_active |= IFM_1000_LX;
   11640 	else
   11641 		ifmr->ifm_active |= IFM_1000_SX;
   11642 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11643 		ifmr->ifm_active |= IFM_FDX;
   11644 	else
   11645 		ifmr->ifm_active |= IFM_HDX;
   11646 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11647 	if (ctrl & CTRL_RFCE)
   11648 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11649 	if (ctrl & CTRL_TFCE)
   11650 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11651 }
   11652 
   11653 /* XXX TBI only */
   11654 static int
   11655 wm_check_for_link(struct wm_softc *sc)
   11656 {
   11657 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11658 	uint32_t rxcw;
   11659 	uint32_t ctrl;
   11660 	uint32_t status;
   11661 	bool signal;
   11662 
   11663 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11664 		device_xname(sc->sc_dev), __func__));
   11665 
   11666 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11667 		/* XXX need some work for >= 82571 */
   11668 		if (sc->sc_type >= WM_T_82571) {
   11669 			sc->sc_tbi_linkup = 1;
   11670 			return 0;
   11671 		}
   11672 	}
   11673 
   11674 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11675 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11676 	status = CSR_READ(sc, WMREG_STATUS);
   11677 	signal = wm_tbi_havesignal(sc, ctrl);
   11678 
   11679 	DPRINTF(WM_DEBUG_LINK,
   11680 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11681 		device_xname(sc->sc_dev), __func__, signal,
   11682 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11683 
   11684 	/*
   11685 	 * SWDPIN   LU RXCW
   11686 	 *	0    0	  0
   11687 	 *	0    0	  1	(should not happen)
   11688 	 *	0    1	  0	(should not happen)
   11689 	 *	0    1	  1	(should not happen)
   11690 	 *	1    0	  0	Disable autonego and force linkup
   11691 	 *	1    0	  1	got /C/ but not linkup yet
   11692 	 *	1    1	  0	(linkup)
   11693 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11694 	 *
   11695 	 */
   11696 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11697 		DPRINTF(WM_DEBUG_LINK,
   11698 		    ("%s: %s: force linkup and fullduplex\n",
   11699 			device_xname(sc->sc_dev), __func__));
   11700 		sc->sc_tbi_linkup = 0;
   11701 		/* Disable auto-negotiation in the TXCW register */
   11702 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11703 
   11704 		/*
   11705 		 * Force link-up and also force full-duplex.
   11706 		 *
   11707 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11708 		 * so we should update sc->sc_ctrl
   11709 		 */
   11710 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11711 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11712 	} else if (((status & STATUS_LU) != 0)
   11713 	    && ((rxcw & RXCW_C) != 0)
   11714 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11715 		sc->sc_tbi_linkup = 1;
   11716 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11717 			device_xname(sc->sc_dev),
   11718 			__func__));
   11719 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11720 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11721 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11722 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11723 			device_xname(sc->sc_dev), __func__));
   11724 	} else {
   11725 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11726 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11727 			status));
   11728 	}
   11729 
   11730 	return 0;
   11731 }
   11732 
   11733 /*
   11734  * wm_tbi_tick:
   11735  *
   11736  *	Check the link on TBI devices.
   11737  *	This function acts as mii_tick().
   11738  */
   11739 static void
   11740 wm_tbi_tick(struct wm_softc *sc)
   11741 {
   11742 	struct mii_data *mii = &sc->sc_mii;
   11743 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11744 	uint32_t status;
   11745 
   11746 	KASSERT(WM_CORE_LOCKED(sc));
   11747 
   11748 	status = CSR_READ(sc, WMREG_STATUS);
   11749 
   11750 	/* XXX is this needed? */
   11751 	(void)CSR_READ(sc, WMREG_RXCW);
   11752 	(void)CSR_READ(sc, WMREG_CTRL);
   11753 
   11754 	/* set link status */
   11755 	if ((status & STATUS_LU) == 0) {
   11756 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11757 			device_xname(sc->sc_dev)));
   11758 		sc->sc_tbi_linkup = 0;
   11759 	} else if (sc->sc_tbi_linkup == 0) {
   11760 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11761 			device_xname(sc->sc_dev),
   11762 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11763 		sc->sc_tbi_linkup = 1;
   11764 		sc->sc_tbi_serdes_ticks = 0;
   11765 	}
   11766 
   11767 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11768 		goto setled;
   11769 
   11770 	if ((status & STATUS_LU) == 0) {
   11771 		sc->sc_tbi_linkup = 0;
   11772 		/* If the timer expired, retry autonegotiation */
   11773 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11774 		    && (++sc->sc_tbi_serdes_ticks
   11775 			>= sc->sc_tbi_serdes_anegticks)) {
   11776 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11777 			sc->sc_tbi_serdes_ticks = 0;
   11778 			/*
   11779 			 * Reset the link, and let autonegotiation do
   11780 			 * its thing
   11781 			 */
   11782 			sc->sc_ctrl |= CTRL_LRST;
   11783 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11784 			CSR_WRITE_FLUSH(sc);
   11785 			delay(1000);
   11786 			sc->sc_ctrl &= ~CTRL_LRST;
   11787 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11788 			CSR_WRITE_FLUSH(sc);
   11789 			delay(1000);
   11790 			CSR_WRITE(sc, WMREG_TXCW,
   11791 			    sc->sc_txcw & ~TXCW_ANE);
   11792 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11793 		}
   11794 	}
   11795 
   11796 setled:
   11797 	wm_tbi_serdes_set_linkled(sc);
   11798 }
   11799 
   11800 /* SERDES related */
   11801 static void
   11802 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11803 {
   11804 	uint32_t reg;
   11805 
   11806 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11807 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11808 		return;
   11809 
   11810 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11811 	reg |= PCS_CFG_PCS_EN;
   11812 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11813 
   11814 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11815 	reg &= ~CTRL_EXT_SWDPIN(3);
   11816 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11817 	CSR_WRITE_FLUSH(sc);
   11818 }
   11819 
   11820 static int
   11821 wm_serdes_mediachange(struct ifnet *ifp)
   11822 {
   11823 	struct wm_softc *sc = ifp->if_softc;
   11824 	bool pcs_autoneg = true; /* XXX */
   11825 	uint32_t ctrl_ext, pcs_lctl, reg;
   11826 
   11827 	/* XXX Currently, this function is not called on 8257[12] */
   11828 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11829 	    || (sc->sc_type >= WM_T_82575))
   11830 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11831 
   11832 	wm_serdes_power_up_link_82575(sc);
   11833 
   11834 	sc->sc_ctrl |= CTRL_SLU;
   11835 
   11836 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11837 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11838 
   11839 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11840 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11841 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11842 	case CTRL_EXT_LINK_MODE_SGMII:
   11843 		pcs_autoneg = true;
   11844 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11845 		break;
   11846 	case CTRL_EXT_LINK_MODE_1000KX:
   11847 		pcs_autoneg = false;
   11848 		/* FALLTHROUGH */
   11849 	default:
   11850 		if ((sc->sc_type == WM_T_82575)
   11851 		    || (sc->sc_type == WM_T_82576)) {
   11852 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11853 				pcs_autoneg = false;
   11854 		}
   11855 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11856 		    | CTRL_FRCFDX;
   11857 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11858 	}
   11859 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11860 
   11861 	if (pcs_autoneg) {
   11862 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11863 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11864 
   11865 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11866 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11867 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11868 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11869 	} else
   11870 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11871 
   11872 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11873 
   11874 
   11875 	return 0;
   11876 }
   11877 
   11878 static void
   11879 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11880 {
   11881 	struct wm_softc *sc = ifp->if_softc;
   11882 	struct mii_data *mii = &sc->sc_mii;
   11883 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11884 	uint32_t pcs_adv, pcs_lpab, reg;
   11885 
   11886 	ifmr->ifm_status = IFM_AVALID;
   11887 	ifmr->ifm_active = IFM_ETHER;
   11888 
   11889 	/* Check PCS */
   11890 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11891 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11892 		ifmr->ifm_active |= IFM_NONE;
   11893 		sc->sc_tbi_linkup = 0;
   11894 		goto setled;
   11895 	}
   11896 
   11897 	sc->sc_tbi_linkup = 1;
   11898 	ifmr->ifm_status |= IFM_ACTIVE;
   11899 	if (sc->sc_type == WM_T_I354) {
   11900 		uint32_t status;
   11901 
   11902 		status = CSR_READ(sc, WMREG_STATUS);
   11903 		if (((status & STATUS_2P5_SKU) != 0)
   11904 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11905 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11906 		} else
   11907 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11908 	} else {
   11909 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11910 		case PCS_LSTS_SPEED_10:
   11911 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11912 			break;
   11913 		case PCS_LSTS_SPEED_100:
   11914 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11915 			break;
   11916 		case PCS_LSTS_SPEED_1000:
   11917 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11918 			break;
   11919 		default:
   11920 			device_printf(sc->sc_dev, "Unknown speed\n");
   11921 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11922 			break;
   11923 		}
   11924 	}
   11925 	if ((reg & PCS_LSTS_FDX) != 0)
   11926 		ifmr->ifm_active |= IFM_FDX;
   11927 	else
   11928 		ifmr->ifm_active |= IFM_HDX;
   11929 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11930 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11931 		/* Check flow */
   11932 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11933 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11934 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11935 			goto setled;
   11936 		}
   11937 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11938 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11939 		DPRINTF(WM_DEBUG_LINK,
   11940 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11941 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11942 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11943 			mii->mii_media_active |= IFM_FLOW
   11944 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11945 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11946 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11947 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11948 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11949 			mii->mii_media_active |= IFM_FLOW
   11950 			    | IFM_ETH_TXPAUSE;
   11951 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11952 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11953 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11954 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11955 			mii->mii_media_active |= IFM_FLOW
   11956 			    | IFM_ETH_RXPAUSE;
   11957 		}
   11958 	}
   11959 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11960 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11961 setled:
   11962 	wm_tbi_serdes_set_linkled(sc);
   11963 }
   11964 
   11965 /*
   11966  * wm_serdes_tick:
   11967  *
   11968  *	Check the link on serdes devices.
   11969  */
   11970 static void
   11971 wm_serdes_tick(struct wm_softc *sc)
   11972 {
   11973 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11974 	struct mii_data *mii = &sc->sc_mii;
   11975 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11976 	uint32_t reg;
   11977 
   11978 	KASSERT(WM_CORE_LOCKED(sc));
   11979 
   11980 	mii->mii_media_status = IFM_AVALID;
   11981 	mii->mii_media_active = IFM_ETHER;
   11982 
   11983 	/* Check PCS */
   11984 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11985 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11986 		mii->mii_media_status |= IFM_ACTIVE;
   11987 		sc->sc_tbi_linkup = 1;
   11988 		sc->sc_tbi_serdes_ticks = 0;
   11989 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11990 		if ((reg & PCS_LSTS_FDX) != 0)
   11991 			mii->mii_media_active |= IFM_FDX;
   11992 		else
   11993 			mii->mii_media_active |= IFM_HDX;
   11994 	} else {
   11995 		mii->mii_media_status |= IFM_NONE;
   11996 		sc->sc_tbi_linkup = 0;
   11997 		/* If the timer expired, retry autonegotiation */
   11998 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11999 		    && (++sc->sc_tbi_serdes_ticks
   12000 			>= sc->sc_tbi_serdes_anegticks)) {
   12001 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12002 			sc->sc_tbi_serdes_ticks = 0;
   12003 			/* XXX */
   12004 			wm_serdes_mediachange(ifp);
   12005 		}
   12006 	}
   12007 
   12008 	wm_tbi_serdes_set_linkled(sc);
   12009 }
   12010 
   12011 /* SFP related */
   12012 
   12013 static int
   12014 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12015 {
   12016 	uint32_t i2ccmd;
   12017 	int i;
   12018 
   12019 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12020 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12021 
   12022 	/* Poll the ready bit */
   12023 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12024 		delay(50);
   12025 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12026 		if (i2ccmd & I2CCMD_READY)
   12027 			break;
   12028 	}
   12029 	if ((i2ccmd & I2CCMD_READY) == 0)
   12030 		return -1;
   12031 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12032 		return -1;
   12033 
   12034 	*data = i2ccmd & 0x00ff;
   12035 
   12036 	return 0;
   12037 }
   12038 
   12039 static uint32_t
   12040 wm_sfp_get_media_type(struct wm_softc *sc)
   12041 {
   12042 	uint32_t ctrl_ext;
   12043 	uint8_t val = 0;
   12044 	int timeout = 3;
   12045 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12046 	int rv = -1;
   12047 
   12048 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12049 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12050 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12051 	CSR_WRITE_FLUSH(sc);
   12052 
   12053 	/* Read SFP module data */
   12054 	while (timeout) {
   12055 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12056 		if (rv == 0)
   12057 			break;
   12058 		delay(100*1000); /* XXX too big */
   12059 		timeout--;
   12060 	}
   12061 	if (rv != 0)
   12062 		goto out;
   12063 	switch (val) {
   12064 	case SFF_SFP_ID_SFF:
   12065 		aprint_normal_dev(sc->sc_dev,
   12066 		    "Module/Connector soldered to board\n");
   12067 		break;
   12068 	case SFF_SFP_ID_SFP:
   12069 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12070 		break;
   12071 	case SFF_SFP_ID_UNKNOWN:
   12072 		goto out;
   12073 	default:
   12074 		break;
   12075 	}
   12076 
   12077 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12078 	if (rv != 0) {
   12079 		goto out;
   12080 	}
   12081 
   12082 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12083 		mediatype = WM_MEDIATYPE_SERDES;
   12084 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12085 		sc->sc_flags |= WM_F_SGMII;
   12086 		mediatype = WM_MEDIATYPE_COPPER;
   12087 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12088 		sc->sc_flags |= WM_F_SGMII;
   12089 		mediatype = WM_MEDIATYPE_SERDES;
   12090 	}
   12091 
   12092 out:
   12093 	/* Restore I2C interface setting */
   12094 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12095 
   12096 	return mediatype;
   12097 }
   12098 
   12099 /*
   12100  * NVM related.
   12101  * Microwire, SPI (w/wo EERD) and Flash.
   12102  */
   12103 
   12104 /* Both spi and uwire */
   12105 
   12106 /*
   12107  * wm_eeprom_sendbits:
   12108  *
   12109  *	Send a series of bits to the EEPROM.
   12110  */
   12111 static void
   12112 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12113 {
   12114 	uint32_t reg;
   12115 	int x;
   12116 
   12117 	reg = CSR_READ(sc, WMREG_EECD);
   12118 
   12119 	for (x = nbits; x > 0; x--) {
   12120 		if (bits & (1U << (x - 1)))
   12121 			reg |= EECD_DI;
   12122 		else
   12123 			reg &= ~EECD_DI;
   12124 		CSR_WRITE(sc, WMREG_EECD, reg);
   12125 		CSR_WRITE_FLUSH(sc);
   12126 		delay(2);
   12127 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12128 		CSR_WRITE_FLUSH(sc);
   12129 		delay(2);
   12130 		CSR_WRITE(sc, WMREG_EECD, reg);
   12131 		CSR_WRITE_FLUSH(sc);
   12132 		delay(2);
   12133 	}
   12134 }
   12135 
   12136 /*
   12137  * wm_eeprom_recvbits:
   12138  *
   12139  *	Receive a series of bits from the EEPROM.
   12140  */
   12141 static void
   12142 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12143 {
   12144 	uint32_t reg, val;
   12145 	int x;
   12146 
   12147 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12148 
   12149 	val = 0;
   12150 	for (x = nbits; x > 0; x--) {
   12151 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12152 		CSR_WRITE_FLUSH(sc);
   12153 		delay(2);
   12154 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12155 			val |= (1U << (x - 1));
   12156 		CSR_WRITE(sc, WMREG_EECD, reg);
   12157 		CSR_WRITE_FLUSH(sc);
   12158 		delay(2);
   12159 	}
   12160 	*valp = val;
   12161 }
   12162 
   12163 /* Microwire */
   12164 
   12165 /*
   12166  * wm_nvm_read_uwire:
   12167  *
   12168  *	Read a word from the EEPROM using the MicroWire protocol.
   12169  */
   12170 static int
   12171 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12172 {
   12173 	uint32_t reg, val;
   12174 	int i;
   12175 
   12176 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12177 		device_xname(sc->sc_dev), __func__));
   12178 
   12179 	if (sc->nvm.acquire(sc) != 0)
   12180 		return -1;
   12181 
   12182 	for (i = 0; i < wordcnt; i++) {
   12183 		/* Clear SK and DI. */
   12184 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12185 		CSR_WRITE(sc, WMREG_EECD, reg);
   12186 
   12187 		/*
   12188 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12189 		 * and Xen.
   12190 		 *
   12191 		 * We use this workaround only for 82540 because qemu's
   12192 		 * e1000 act as 82540.
   12193 		 */
   12194 		if (sc->sc_type == WM_T_82540) {
   12195 			reg |= EECD_SK;
   12196 			CSR_WRITE(sc, WMREG_EECD, reg);
   12197 			reg &= ~EECD_SK;
   12198 			CSR_WRITE(sc, WMREG_EECD, reg);
   12199 			CSR_WRITE_FLUSH(sc);
   12200 			delay(2);
   12201 		}
   12202 		/* XXX: end of workaround */
   12203 
   12204 		/* Set CHIP SELECT. */
   12205 		reg |= EECD_CS;
   12206 		CSR_WRITE(sc, WMREG_EECD, reg);
   12207 		CSR_WRITE_FLUSH(sc);
   12208 		delay(2);
   12209 
   12210 		/* Shift in the READ command. */
   12211 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12212 
   12213 		/* Shift in address. */
   12214 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12215 
   12216 		/* Shift out the data. */
   12217 		wm_eeprom_recvbits(sc, &val, 16);
   12218 		data[i] = val & 0xffff;
   12219 
   12220 		/* Clear CHIP SELECT. */
   12221 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12222 		CSR_WRITE(sc, WMREG_EECD, reg);
   12223 		CSR_WRITE_FLUSH(sc);
   12224 		delay(2);
   12225 	}
   12226 
   12227 	sc->nvm.release(sc);
   12228 	return 0;
   12229 }
   12230 
   12231 /* SPI */
   12232 
   12233 /*
   12234  * Set SPI and FLASH related information from the EECD register.
   12235  * For 82541 and 82547, the word size is taken from EEPROM.
   12236  */
   12237 static int
   12238 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12239 {
   12240 	int size;
   12241 	uint32_t reg;
   12242 	uint16_t data;
   12243 
   12244 	reg = CSR_READ(sc, WMREG_EECD);
   12245 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12246 
   12247 	/* Read the size of NVM from EECD by default */
   12248 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12249 	switch (sc->sc_type) {
   12250 	case WM_T_82541:
   12251 	case WM_T_82541_2:
   12252 	case WM_T_82547:
   12253 	case WM_T_82547_2:
   12254 		/* Set dummy value to access EEPROM */
   12255 		sc->sc_nvm_wordsize = 64;
   12256 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12257 			aprint_error_dev(sc->sc_dev,
   12258 			    "%s: failed to read EEPROM size\n", __func__);
   12259 		}
   12260 		reg = data;
   12261 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12262 		if (size == 0)
   12263 			size = 6; /* 64 word size */
   12264 		else
   12265 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12266 		break;
   12267 	case WM_T_80003:
   12268 	case WM_T_82571:
   12269 	case WM_T_82572:
   12270 	case WM_T_82573: /* SPI case */
   12271 	case WM_T_82574: /* SPI case */
   12272 	case WM_T_82583: /* SPI case */
   12273 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12274 		if (size > 14)
   12275 			size = 14;
   12276 		break;
   12277 	case WM_T_82575:
   12278 	case WM_T_82576:
   12279 	case WM_T_82580:
   12280 	case WM_T_I350:
   12281 	case WM_T_I354:
   12282 	case WM_T_I210:
   12283 	case WM_T_I211:
   12284 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12285 		if (size > 15)
   12286 			size = 15;
   12287 		break;
   12288 	default:
   12289 		aprint_error_dev(sc->sc_dev,
   12290 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12291 		return -1;
   12292 		break;
   12293 	}
   12294 
   12295 	sc->sc_nvm_wordsize = 1 << size;
   12296 
   12297 	return 0;
   12298 }
   12299 
   12300 /*
   12301  * wm_nvm_ready_spi:
   12302  *
   12303  *	Wait for a SPI EEPROM to be ready for commands.
   12304  */
   12305 static int
   12306 wm_nvm_ready_spi(struct wm_softc *sc)
   12307 {
   12308 	uint32_t val;
   12309 	int usec;
   12310 
   12311 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12312 		device_xname(sc->sc_dev), __func__));
   12313 
   12314 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12315 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12316 		wm_eeprom_recvbits(sc, &val, 8);
   12317 		if ((val & SPI_SR_RDY) == 0)
   12318 			break;
   12319 	}
   12320 	if (usec >= SPI_MAX_RETRIES) {
   12321 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12322 		return -1;
   12323 	}
   12324 	return 0;
   12325 }
   12326 
   12327 /*
   12328  * wm_nvm_read_spi:
   12329  *
   12330  *	Read a work from the EEPROM using the SPI protocol.
   12331  */
   12332 static int
   12333 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12334 {
   12335 	uint32_t reg, val;
   12336 	int i;
   12337 	uint8_t opc;
   12338 	int rv = 0;
   12339 
   12340 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12341 		device_xname(sc->sc_dev), __func__));
   12342 
   12343 	if (sc->nvm.acquire(sc) != 0)
   12344 		return -1;
   12345 
   12346 	/* Clear SK and CS. */
   12347 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12348 	CSR_WRITE(sc, WMREG_EECD, reg);
   12349 	CSR_WRITE_FLUSH(sc);
   12350 	delay(2);
   12351 
   12352 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12353 		goto out;
   12354 
   12355 	/* Toggle CS to flush commands. */
   12356 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12357 	CSR_WRITE_FLUSH(sc);
   12358 	delay(2);
   12359 	CSR_WRITE(sc, WMREG_EECD, reg);
   12360 	CSR_WRITE_FLUSH(sc);
   12361 	delay(2);
   12362 
   12363 	opc = SPI_OPC_READ;
   12364 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12365 		opc |= SPI_OPC_A8;
   12366 
   12367 	wm_eeprom_sendbits(sc, opc, 8);
   12368 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12369 
   12370 	for (i = 0; i < wordcnt; i++) {
   12371 		wm_eeprom_recvbits(sc, &val, 16);
   12372 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12373 	}
   12374 
   12375 	/* Raise CS and clear SK. */
   12376 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12377 	CSR_WRITE(sc, WMREG_EECD, reg);
   12378 	CSR_WRITE_FLUSH(sc);
   12379 	delay(2);
   12380 
   12381 out:
   12382 	sc->nvm.release(sc);
   12383 	return rv;
   12384 }
   12385 
   12386 /* Using with EERD */
   12387 
   12388 static int
   12389 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12390 {
   12391 	uint32_t attempts = 100000;
   12392 	uint32_t i, reg = 0;
   12393 	int32_t done = -1;
   12394 
   12395 	for (i = 0; i < attempts; i++) {
   12396 		reg = CSR_READ(sc, rw);
   12397 
   12398 		if (reg & EERD_DONE) {
   12399 			done = 0;
   12400 			break;
   12401 		}
   12402 		delay(5);
   12403 	}
   12404 
   12405 	return done;
   12406 }
   12407 
   12408 static int
   12409 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12410 {
   12411 	int i, eerd = 0;
   12412 	int rv = 0;
   12413 
   12414 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12415 		device_xname(sc->sc_dev), __func__));
   12416 
   12417 	if (sc->nvm.acquire(sc) != 0)
   12418 		return -1;
   12419 
   12420 	for (i = 0; i < wordcnt; i++) {
   12421 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12422 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12423 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12424 		if (rv != 0) {
   12425 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12426 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12427 			break;
   12428 		}
   12429 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12430 	}
   12431 
   12432 	sc->nvm.release(sc);
   12433 	return rv;
   12434 }
   12435 
   12436 /* Flash */
   12437 
   12438 static int
   12439 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12440 {
   12441 	uint32_t eecd;
   12442 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12443 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12444 	uint32_t nvm_dword = 0;
   12445 	uint8_t sig_byte = 0;
   12446 	int rv;
   12447 
   12448 	switch (sc->sc_type) {
   12449 	case WM_T_PCH_SPT:
   12450 	case WM_T_PCH_CNP:
   12451 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12452 		act_offset = ICH_NVM_SIG_WORD * 2;
   12453 
   12454 		/* set bank to 0 in case flash read fails. */
   12455 		*bank = 0;
   12456 
   12457 		/* Check bank 0 */
   12458 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12459 		if (rv != 0)
   12460 			return rv;
   12461 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12462 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12463 			*bank = 0;
   12464 			return 0;
   12465 		}
   12466 
   12467 		/* Check bank 1 */
   12468 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12469 		    &nvm_dword);
   12470 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12471 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12472 			*bank = 1;
   12473 			return 0;
   12474 		}
   12475 		aprint_error_dev(sc->sc_dev,
   12476 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12477 		return -1;
   12478 	case WM_T_ICH8:
   12479 	case WM_T_ICH9:
   12480 		eecd = CSR_READ(sc, WMREG_EECD);
   12481 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12482 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12483 			return 0;
   12484 		}
   12485 		/* FALLTHROUGH */
   12486 	default:
   12487 		/* Default to 0 */
   12488 		*bank = 0;
   12489 
   12490 		/* Check bank 0 */
   12491 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12492 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12493 			*bank = 0;
   12494 			return 0;
   12495 		}
   12496 
   12497 		/* Check bank 1 */
   12498 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12499 		    &sig_byte);
   12500 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12501 			*bank = 1;
   12502 			return 0;
   12503 		}
   12504 	}
   12505 
   12506 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12507 		device_xname(sc->sc_dev)));
   12508 	return -1;
   12509 }
   12510 
   12511 /******************************************************************************
   12512  * This function does initial flash setup so that a new read/write/erase cycle
   12513  * can be started.
   12514  *
   12515  * sc - The pointer to the hw structure
   12516  ****************************************************************************/
   12517 static int32_t
   12518 wm_ich8_cycle_init(struct wm_softc *sc)
   12519 {
   12520 	uint16_t hsfsts;
   12521 	int32_t error = 1;
   12522 	int32_t i     = 0;
   12523 
   12524 	if (sc->sc_type >= WM_T_PCH_SPT)
   12525 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12526 	else
   12527 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12528 
   12529 	/* May be check the Flash Des Valid bit in Hw status */
   12530 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12531 		return error;
   12532 
   12533 	/* Clear FCERR in Hw status by writing 1 */
   12534 	/* Clear DAEL in Hw status by writing a 1 */
   12535 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12536 
   12537 	if (sc->sc_type >= WM_T_PCH_SPT)
   12538 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12539 	else
   12540 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12541 
   12542 	/*
   12543 	 * Either we should have a hardware SPI cycle in progress bit to check
   12544 	 * against, in order to start a new cycle or FDONE bit should be
   12545 	 * changed in the hardware so that it is 1 after harware reset, which
   12546 	 * can then be used as an indication whether a cycle is in progress or
   12547 	 * has been completed .. we should also have some software semaphore
   12548 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12549 	 * threads access to those bits can be sequentiallized or a way so that
   12550 	 * 2 threads dont start the cycle at the same time
   12551 	 */
   12552 
   12553 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12554 		/*
   12555 		 * There is no cycle running at present, so we can start a
   12556 		 * cycle
   12557 		 */
   12558 
   12559 		/* Begin by setting Flash Cycle Done. */
   12560 		hsfsts |= HSFSTS_DONE;
   12561 		if (sc->sc_type >= WM_T_PCH_SPT)
   12562 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12563 			    hsfsts & 0xffffUL);
   12564 		else
   12565 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12566 		error = 0;
   12567 	} else {
   12568 		/*
   12569 		 * otherwise poll for sometime so the current cycle has a
   12570 		 * chance to end before giving up.
   12571 		 */
   12572 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12573 			if (sc->sc_type >= WM_T_PCH_SPT)
   12574 				hsfsts = ICH8_FLASH_READ32(sc,
   12575 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12576 			else
   12577 				hsfsts = ICH8_FLASH_READ16(sc,
   12578 				    ICH_FLASH_HSFSTS);
   12579 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12580 				error = 0;
   12581 				break;
   12582 			}
   12583 			delay(1);
   12584 		}
   12585 		if (error == 0) {
   12586 			/*
   12587 			 * Successful in waiting for previous cycle to timeout,
   12588 			 * now set the Flash Cycle Done.
   12589 			 */
   12590 			hsfsts |= HSFSTS_DONE;
   12591 			if (sc->sc_type >= WM_T_PCH_SPT)
   12592 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12593 				    hsfsts & 0xffffUL);
   12594 			else
   12595 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12596 				    hsfsts);
   12597 		}
   12598 	}
   12599 	return error;
   12600 }
   12601 
   12602 /******************************************************************************
   12603  * This function starts a flash cycle and waits for its completion
   12604  *
   12605  * sc - The pointer to the hw structure
   12606  ****************************************************************************/
   12607 static int32_t
   12608 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12609 {
   12610 	uint16_t hsflctl;
   12611 	uint16_t hsfsts;
   12612 	int32_t error = 1;
   12613 	uint32_t i = 0;
   12614 
   12615 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12616 	if (sc->sc_type >= WM_T_PCH_SPT)
   12617 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12618 	else
   12619 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12620 	hsflctl |= HSFCTL_GO;
   12621 	if (sc->sc_type >= WM_T_PCH_SPT)
   12622 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12623 		    (uint32_t)hsflctl << 16);
   12624 	else
   12625 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12626 
   12627 	/* Wait till FDONE bit is set to 1 */
   12628 	do {
   12629 		if (sc->sc_type >= WM_T_PCH_SPT)
   12630 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12631 			    & 0xffffUL;
   12632 		else
   12633 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12634 		if (hsfsts & HSFSTS_DONE)
   12635 			break;
   12636 		delay(1);
   12637 		i++;
   12638 	} while (i < timeout);
   12639 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12640 		error = 0;
   12641 
   12642 	return error;
   12643 }
   12644 
   12645 /******************************************************************************
   12646  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12647  *
   12648  * sc - The pointer to the hw structure
   12649  * index - The index of the byte or word to read.
   12650  * size - Size of data to read, 1=byte 2=word, 4=dword
   12651  * data - Pointer to the word to store the value read.
   12652  *****************************************************************************/
   12653 static int32_t
   12654 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12655     uint32_t size, uint32_t *data)
   12656 {
   12657 	uint16_t hsfsts;
   12658 	uint16_t hsflctl;
   12659 	uint32_t flash_linear_address;
   12660 	uint32_t flash_data = 0;
   12661 	int32_t error = 1;
   12662 	int32_t count = 0;
   12663 
   12664 	if (size < 1  || size > 4 || data == 0x0 ||
   12665 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12666 		return error;
   12667 
   12668 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12669 	    sc->sc_ich8_flash_base;
   12670 
   12671 	do {
   12672 		delay(1);
   12673 		/* Steps */
   12674 		error = wm_ich8_cycle_init(sc);
   12675 		if (error)
   12676 			break;
   12677 
   12678 		if (sc->sc_type >= WM_T_PCH_SPT)
   12679 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12680 			    >> 16;
   12681 		else
   12682 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12683 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12684 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12685 		    & HSFCTL_BCOUNT_MASK;
   12686 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12687 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12688 			/*
   12689 			 * In SPT, This register is in Lan memory space, not
   12690 			 * flash. Therefore, only 32 bit access is supported.
   12691 			 */
   12692 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12693 			    (uint32_t)hsflctl << 16);
   12694 		} else
   12695 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12696 
   12697 		/*
   12698 		 * Write the last 24 bits of index into Flash Linear address
   12699 		 * field in Flash Address
   12700 		 */
   12701 		/* TODO: TBD maybe check the index against the size of flash */
   12702 
   12703 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12704 
   12705 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12706 
   12707 		/*
   12708 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12709 		 * the whole sequence a few more times, else read in (shift in)
   12710 		 * the Flash Data0, the order is least significant byte first
   12711 		 * msb to lsb
   12712 		 */
   12713 		if (error == 0) {
   12714 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12715 			if (size == 1)
   12716 				*data = (uint8_t)(flash_data & 0x000000FF);
   12717 			else if (size == 2)
   12718 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12719 			else if (size == 4)
   12720 				*data = (uint32_t)flash_data;
   12721 			break;
   12722 		} else {
   12723 			/*
   12724 			 * If we've gotten here, then things are probably
   12725 			 * completely hosed, but if the error condition is
   12726 			 * detected, it won't hurt to give it another try...
   12727 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12728 			 */
   12729 			if (sc->sc_type >= WM_T_PCH_SPT)
   12730 				hsfsts = ICH8_FLASH_READ32(sc,
   12731 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12732 			else
   12733 				hsfsts = ICH8_FLASH_READ16(sc,
   12734 				    ICH_FLASH_HSFSTS);
   12735 
   12736 			if (hsfsts & HSFSTS_ERR) {
   12737 				/* Repeat for some time before giving up. */
   12738 				continue;
   12739 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12740 				break;
   12741 		}
   12742 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12743 
   12744 	return error;
   12745 }
   12746 
   12747 /******************************************************************************
   12748  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12749  *
   12750  * sc - pointer to wm_hw structure
   12751  * index - The index of the byte to read.
   12752  * data - Pointer to a byte to store the value read.
   12753  *****************************************************************************/
   12754 static int32_t
   12755 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12756 {
   12757 	int32_t status;
   12758 	uint32_t word = 0;
   12759 
   12760 	status = wm_read_ich8_data(sc, index, 1, &word);
   12761 	if (status == 0)
   12762 		*data = (uint8_t)word;
   12763 	else
   12764 		*data = 0;
   12765 
   12766 	return status;
   12767 }
   12768 
   12769 /******************************************************************************
   12770  * Reads a word from the NVM using the ICH8 flash access registers.
   12771  *
   12772  * sc - pointer to wm_hw structure
   12773  * index - The starting byte index of the word to read.
   12774  * data - Pointer to a word to store the value read.
   12775  *****************************************************************************/
   12776 static int32_t
   12777 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12778 {
   12779 	int32_t status;
   12780 	uint32_t word = 0;
   12781 
   12782 	status = wm_read_ich8_data(sc, index, 2, &word);
   12783 	if (status == 0)
   12784 		*data = (uint16_t)word;
   12785 	else
   12786 		*data = 0;
   12787 
   12788 	return status;
   12789 }
   12790 
   12791 /******************************************************************************
   12792  * Reads a dword from the NVM using the ICH8 flash access registers.
   12793  *
   12794  * sc - pointer to wm_hw structure
   12795  * index - The starting byte index of the word to read.
   12796  * data - Pointer to a word to store the value read.
   12797  *****************************************************************************/
   12798 static int32_t
   12799 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12800 {
   12801 	int32_t status;
   12802 
   12803 	status = wm_read_ich8_data(sc, index, 4, data);
   12804 	return status;
   12805 }
   12806 
   12807 /******************************************************************************
   12808  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12809  * register.
   12810  *
   12811  * sc - Struct containing variables accessed by shared code
   12812  * offset - offset of word in the EEPROM to read
   12813  * data - word read from the EEPROM
   12814  * words - number of words to read
   12815  *****************************************************************************/
   12816 static int
   12817 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12818 {
   12819 	int32_t	 rv = 0;
   12820 	uint32_t flash_bank = 0;
   12821 	uint32_t act_offset = 0;
   12822 	uint32_t bank_offset = 0;
   12823 	uint16_t word = 0;
   12824 	uint16_t i = 0;
   12825 
   12826 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12827 		device_xname(sc->sc_dev), __func__));
   12828 
   12829 	if (sc->nvm.acquire(sc) != 0)
   12830 		return -1;
   12831 
   12832 	/*
   12833 	 * We need to know which is the valid flash bank.  In the event
   12834 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12835 	 * managing flash_bank. So it cannot be trusted and needs
   12836 	 * to be updated with each read.
   12837 	 */
   12838 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12839 	if (rv) {
   12840 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12841 			device_xname(sc->sc_dev)));
   12842 		flash_bank = 0;
   12843 	}
   12844 
   12845 	/*
   12846 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12847 	 * size
   12848 	 */
   12849 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12850 
   12851 	for (i = 0; i < words; i++) {
   12852 		/* The NVM part needs a byte offset, hence * 2 */
   12853 		act_offset = bank_offset + ((offset + i) * 2);
   12854 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12855 		if (rv) {
   12856 			aprint_error_dev(sc->sc_dev,
   12857 			    "%s: failed to read NVM\n", __func__);
   12858 			break;
   12859 		}
   12860 		data[i] = word;
   12861 	}
   12862 
   12863 	sc->nvm.release(sc);
   12864 	return rv;
   12865 }
   12866 
   12867 /******************************************************************************
   12868  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12869  * register.
   12870  *
   12871  * sc - Struct containing variables accessed by shared code
   12872  * offset - offset of word in the EEPROM to read
   12873  * data - word read from the EEPROM
   12874  * words - number of words to read
   12875  *****************************************************************************/
   12876 static int
   12877 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12878 {
   12879 	int32_t	 rv = 0;
   12880 	uint32_t flash_bank = 0;
   12881 	uint32_t act_offset = 0;
   12882 	uint32_t bank_offset = 0;
   12883 	uint32_t dword = 0;
   12884 	uint16_t i = 0;
   12885 
   12886 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12887 		device_xname(sc->sc_dev), __func__));
   12888 
   12889 	if (sc->nvm.acquire(sc) != 0)
   12890 		return -1;
   12891 
   12892 	/*
   12893 	 * We need to know which is the valid flash bank.  In the event
   12894 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12895 	 * managing flash_bank. So it cannot be trusted and needs
   12896 	 * to be updated with each read.
   12897 	 */
   12898 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12899 	if (rv) {
   12900 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12901 			device_xname(sc->sc_dev)));
   12902 		flash_bank = 0;
   12903 	}
   12904 
   12905 	/*
   12906 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12907 	 * size
   12908 	 */
   12909 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12910 
   12911 	for (i = 0; i < words; i++) {
   12912 		/* The NVM part needs a byte offset, hence * 2 */
   12913 		act_offset = bank_offset + ((offset + i) * 2);
   12914 		/* but we must read dword aligned, so mask ... */
   12915 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12916 		if (rv) {
   12917 			aprint_error_dev(sc->sc_dev,
   12918 			    "%s: failed to read NVM\n", __func__);
   12919 			break;
   12920 		}
   12921 		/* ... and pick out low or high word */
   12922 		if ((act_offset & 0x2) == 0)
   12923 			data[i] = (uint16_t)(dword & 0xFFFF);
   12924 		else
   12925 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12926 	}
   12927 
   12928 	sc->nvm.release(sc);
   12929 	return rv;
   12930 }
   12931 
   12932 /* iNVM */
   12933 
   12934 static int
   12935 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12936 {
   12937 	int32_t	 rv = 0;
   12938 	uint32_t invm_dword;
   12939 	uint16_t i;
   12940 	uint8_t record_type, word_address;
   12941 
   12942 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12943 		device_xname(sc->sc_dev), __func__));
   12944 
   12945 	for (i = 0; i < INVM_SIZE; i++) {
   12946 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12947 		/* Get record type */
   12948 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12949 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12950 			break;
   12951 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12952 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12953 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12954 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12955 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12956 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12957 			if (word_address == address) {
   12958 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12959 				rv = 0;
   12960 				break;
   12961 			}
   12962 		}
   12963 	}
   12964 
   12965 	return rv;
   12966 }
   12967 
   12968 static int
   12969 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12970 {
   12971 	int rv = 0;
   12972 	int i;
   12973 
   12974 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12975 		device_xname(sc->sc_dev), __func__));
   12976 
   12977 	if (sc->nvm.acquire(sc) != 0)
   12978 		return -1;
   12979 
   12980 	for (i = 0; i < words; i++) {
   12981 		switch (offset + i) {
   12982 		case NVM_OFF_MACADDR:
   12983 		case NVM_OFF_MACADDR1:
   12984 		case NVM_OFF_MACADDR2:
   12985 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12986 			if (rv != 0) {
   12987 				data[i] = 0xffff;
   12988 				rv = -1;
   12989 			}
   12990 			break;
   12991 		case NVM_OFF_CFG2:
   12992 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12993 			if (rv != 0) {
   12994 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12995 				rv = 0;
   12996 			}
   12997 			break;
   12998 		case NVM_OFF_CFG4:
   12999 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13000 			if (rv != 0) {
   13001 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13002 				rv = 0;
   13003 			}
   13004 			break;
   13005 		case NVM_OFF_LED_1_CFG:
   13006 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13007 			if (rv != 0) {
   13008 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13009 				rv = 0;
   13010 			}
   13011 			break;
   13012 		case NVM_OFF_LED_0_2_CFG:
   13013 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13014 			if (rv != 0) {
   13015 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13016 				rv = 0;
   13017 			}
   13018 			break;
   13019 		case NVM_OFF_ID_LED_SETTINGS:
   13020 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13021 			if (rv != 0) {
   13022 				*data = ID_LED_RESERVED_FFFF;
   13023 				rv = 0;
   13024 			}
   13025 			break;
   13026 		default:
   13027 			DPRINTF(WM_DEBUG_NVM,
   13028 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13029 			*data = NVM_RESERVED_WORD;
   13030 			break;
   13031 		}
   13032 	}
   13033 
   13034 	sc->nvm.release(sc);
   13035 	return rv;
   13036 }
   13037 
   13038 /* Lock, detecting NVM type, validate checksum, version and read */
   13039 
   13040 static int
   13041 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13042 {
   13043 	uint32_t eecd = 0;
   13044 
   13045 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13046 	    || sc->sc_type == WM_T_82583) {
   13047 		eecd = CSR_READ(sc, WMREG_EECD);
   13048 
   13049 		/* Isolate bits 15 & 16 */
   13050 		eecd = ((eecd >> 15) & 0x03);
   13051 
   13052 		/* If both bits are set, device is Flash type */
   13053 		if (eecd == 0x03)
   13054 			return 0;
   13055 	}
   13056 	return 1;
   13057 }
   13058 
   13059 static int
   13060 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13061 {
   13062 	uint32_t eec;
   13063 
   13064 	eec = CSR_READ(sc, WMREG_EEC);
   13065 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13066 		return 1;
   13067 
   13068 	return 0;
   13069 }
   13070 
   13071 /*
   13072  * wm_nvm_validate_checksum
   13073  *
   13074  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13075  */
   13076 static int
   13077 wm_nvm_validate_checksum(struct wm_softc *sc)
   13078 {
   13079 	uint16_t checksum;
   13080 	uint16_t eeprom_data;
   13081 #ifdef WM_DEBUG
   13082 	uint16_t csum_wordaddr, valid_checksum;
   13083 #endif
   13084 	int i;
   13085 
   13086 	checksum = 0;
   13087 
   13088 	/* Don't check for I211 */
   13089 	if (sc->sc_type == WM_T_I211)
   13090 		return 0;
   13091 
   13092 #ifdef WM_DEBUG
   13093 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13094 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13095 		csum_wordaddr = NVM_OFF_COMPAT;
   13096 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13097 	} else {
   13098 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13099 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13100 	}
   13101 
   13102 	/* Dump EEPROM image for debug */
   13103 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13104 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13105 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13106 		/* XXX PCH_SPT? */
   13107 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13108 		if ((eeprom_data & valid_checksum) == 0) {
   13109 			DPRINTF(WM_DEBUG_NVM,
   13110 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13111 				device_xname(sc->sc_dev), eeprom_data,
   13112 				    valid_checksum));
   13113 		}
   13114 	}
   13115 
   13116 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13117 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13118 		for (i = 0; i < NVM_SIZE; i++) {
   13119 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13120 				printf("XXXX ");
   13121 			else
   13122 				printf("%04hx ", eeprom_data);
   13123 			if (i % 8 == 7)
   13124 				printf("\n");
   13125 		}
   13126 	}
   13127 
   13128 #endif /* WM_DEBUG */
   13129 
   13130 	for (i = 0; i < NVM_SIZE; i++) {
   13131 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13132 			return 1;
   13133 		checksum += eeprom_data;
   13134 	}
   13135 
   13136 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13137 #ifdef WM_DEBUG
   13138 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13139 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13140 #endif
   13141 	}
   13142 
   13143 	return 0;
   13144 }
   13145 
   13146 static void
   13147 wm_nvm_version_invm(struct wm_softc *sc)
   13148 {
   13149 	uint32_t dword;
   13150 
   13151 	/*
   13152 	 * Linux's code to decode version is very strange, so we don't
   13153 	 * obey that algorithm and just use word 61 as the document.
   13154 	 * Perhaps it's not perfect though...
   13155 	 *
   13156 	 * Example:
   13157 	 *
   13158 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13159 	 */
   13160 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13161 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13162 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13163 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13164 }
   13165 
   13166 static void
   13167 wm_nvm_version(struct wm_softc *sc)
   13168 {
   13169 	uint16_t major, minor, build, patch;
   13170 	uint16_t uid0, uid1;
   13171 	uint16_t nvm_data;
   13172 	uint16_t off;
   13173 	bool check_version = false;
   13174 	bool check_optionrom = false;
   13175 	bool have_build = false;
   13176 	bool have_uid = true;
   13177 
   13178 	/*
   13179 	 * Version format:
   13180 	 *
   13181 	 * XYYZ
   13182 	 * X0YZ
   13183 	 * X0YY
   13184 	 *
   13185 	 * Example:
   13186 	 *
   13187 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13188 	 *	82571	0x50a6	5.10.6?
   13189 	 *	82572	0x506a	5.6.10?
   13190 	 *	82572EI	0x5069	5.6.9?
   13191 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13192 	 *		0x2013	2.1.3?
   13193 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13194 	 */
   13195 
   13196 	/*
   13197 	 * XXX
   13198 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13199 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13200 	 */
   13201 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13202 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13203 		have_uid = false;
   13204 
   13205 	switch (sc->sc_type) {
   13206 	case WM_T_82571:
   13207 	case WM_T_82572:
   13208 	case WM_T_82574:
   13209 	case WM_T_82583:
   13210 		check_version = true;
   13211 		check_optionrom = true;
   13212 		have_build = true;
   13213 		break;
   13214 	case WM_T_82575:
   13215 	case WM_T_82576:
   13216 	case WM_T_82580:
   13217 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13218 			check_version = true;
   13219 		break;
   13220 	case WM_T_I211:
   13221 		wm_nvm_version_invm(sc);
   13222 		have_uid = false;
   13223 		goto printver;
   13224 	case WM_T_I210:
   13225 		if (!wm_nvm_flash_presence_i210(sc)) {
   13226 			wm_nvm_version_invm(sc);
   13227 			have_uid = false;
   13228 			goto printver;
   13229 		}
   13230 		/* FALLTHROUGH */
   13231 	case WM_T_I350:
   13232 	case WM_T_I354:
   13233 		check_version = true;
   13234 		check_optionrom = true;
   13235 		break;
   13236 	default:
   13237 		return;
   13238 	}
   13239 	if (check_version
   13240 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13241 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13242 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13243 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13244 			build = nvm_data & NVM_BUILD_MASK;
   13245 			have_build = true;
   13246 		} else
   13247 			minor = nvm_data & 0x00ff;
   13248 
   13249 		/* Decimal */
   13250 		minor = (minor / 16) * 10 + (minor % 16);
   13251 		sc->sc_nvm_ver_major = major;
   13252 		sc->sc_nvm_ver_minor = minor;
   13253 
   13254 printver:
   13255 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13256 		    sc->sc_nvm_ver_minor);
   13257 		if (have_build) {
   13258 			sc->sc_nvm_ver_build = build;
   13259 			aprint_verbose(".%d", build);
   13260 		}
   13261 	}
   13262 
   13263 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13264 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13265 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13266 		/* Option ROM Version */
   13267 		if ((off != 0x0000) && (off != 0xffff)) {
   13268 			int rv;
   13269 
   13270 			off += NVM_COMBO_VER_OFF;
   13271 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13272 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13273 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13274 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13275 				/* 16bits */
   13276 				major = uid0 >> 8;
   13277 				build = (uid0 << 8) | (uid1 >> 8);
   13278 				patch = uid1 & 0x00ff;
   13279 				aprint_verbose(", option ROM Version %d.%d.%d",
   13280 				    major, build, patch);
   13281 			}
   13282 		}
   13283 	}
   13284 
   13285 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13286 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13287 }
   13288 
   13289 /*
   13290  * wm_nvm_read:
   13291  *
   13292  *	Read data from the serial EEPROM.
   13293  */
   13294 static int
   13295 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13296 {
   13297 	int rv;
   13298 
   13299 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13300 		device_xname(sc->sc_dev), __func__));
   13301 
   13302 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13303 		return -1;
   13304 
   13305 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13306 
   13307 	return rv;
   13308 }
   13309 
   13310 /*
   13311  * Hardware semaphores.
   13312  * Very complexed...
   13313  */
   13314 
   13315 static int
   13316 wm_get_null(struct wm_softc *sc)
   13317 {
   13318 
   13319 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13320 		device_xname(sc->sc_dev), __func__));
   13321 	return 0;
   13322 }
   13323 
   13324 static void
   13325 wm_put_null(struct wm_softc *sc)
   13326 {
   13327 
   13328 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13329 		device_xname(sc->sc_dev), __func__));
   13330 	return;
   13331 }
   13332 
   13333 static int
   13334 wm_get_eecd(struct wm_softc *sc)
   13335 {
   13336 	uint32_t reg;
   13337 	int x;
   13338 
   13339 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13340 		device_xname(sc->sc_dev), __func__));
   13341 
   13342 	reg = CSR_READ(sc, WMREG_EECD);
   13343 
   13344 	/* Request EEPROM access. */
   13345 	reg |= EECD_EE_REQ;
   13346 	CSR_WRITE(sc, WMREG_EECD, reg);
   13347 
   13348 	/* ..and wait for it to be granted. */
   13349 	for (x = 0; x < 1000; x++) {
   13350 		reg = CSR_READ(sc, WMREG_EECD);
   13351 		if (reg & EECD_EE_GNT)
   13352 			break;
   13353 		delay(5);
   13354 	}
   13355 	if ((reg & EECD_EE_GNT) == 0) {
   13356 		aprint_error_dev(sc->sc_dev,
   13357 		    "could not acquire EEPROM GNT\n");
   13358 		reg &= ~EECD_EE_REQ;
   13359 		CSR_WRITE(sc, WMREG_EECD, reg);
   13360 		return -1;
   13361 	}
   13362 
   13363 	return 0;
   13364 }
   13365 
   13366 static void
   13367 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13368 {
   13369 
   13370 	*eecd |= EECD_SK;
   13371 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13372 	CSR_WRITE_FLUSH(sc);
   13373 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13374 		delay(1);
   13375 	else
   13376 		delay(50);
   13377 }
   13378 
   13379 static void
   13380 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13381 {
   13382 
   13383 	*eecd &= ~EECD_SK;
   13384 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13385 	CSR_WRITE_FLUSH(sc);
   13386 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13387 		delay(1);
   13388 	else
   13389 		delay(50);
   13390 }
   13391 
   13392 static void
   13393 wm_put_eecd(struct wm_softc *sc)
   13394 {
   13395 	uint32_t reg;
   13396 
   13397 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13398 		device_xname(sc->sc_dev), __func__));
   13399 
   13400 	/* Stop nvm */
   13401 	reg = CSR_READ(sc, WMREG_EECD);
   13402 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13403 		/* Pull CS high */
   13404 		reg |= EECD_CS;
   13405 		wm_nvm_eec_clock_lower(sc, &reg);
   13406 	} else {
   13407 		/* CS on Microwire is active-high */
   13408 		reg &= ~(EECD_CS | EECD_DI);
   13409 		CSR_WRITE(sc, WMREG_EECD, reg);
   13410 		wm_nvm_eec_clock_raise(sc, &reg);
   13411 		wm_nvm_eec_clock_lower(sc, &reg);
   13412 	}
   13413 
   13414 	reg = CSR_READ(sc, WMREG_EECD);
   13415 	reg &= ~EECD_EE_REQ;
   13416 	CSR_WRITE(sc, WMREG_EECD, reg);
   13417 
   13418 	return;
   13419 }
   13420 
   13421 /*
   13422  * Get hardware semaphore.
   13423  * Same as e1000_get_hw_semaphore_generic()
   13424  */
   13425 static int
   13426 wm_get_swsm_semaphore(struct wm_softc *sc)
   13427 {
   13428 	int32_t timeout;
   13429 	uint32_t swsm;
   13430 
   13431 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13432 		device_xname(sc->sc_dev), __func__));
   13433 	KASSERT(sc->sc_nvm_wordsize > 0);
   13434 
   13435 retry:
   13436 	/* Get the SW semaphore. */
   13437 	timeout = sc->sc_nvm_wordsize + 1;
   13438 	while (timeout) {
   13439 		swsm = CSR_READ(sc, WMREG_SWSM);
   13440 
   13441 		if ((swsm & SWSM_SMBI) == 0)
   13442 			break;
   13443 
   13444 		delay(50);
   13445 		timeout--;
   13446 	}
   13447 
   13448 	if (timeout == 0) {
   13449 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13450 			/*
   13451 			 * In rare circumstances, the SW semaphore may already
   13452 			 * be held unintentionally. Clear the semaphore once
   13453 			 * before giving up.
   13454 			 */
   13455 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13456 			wm_put_swsm_semaphore(sc);
   13457 			goto retry;
   13458 		}
   13459 		aprint_error_dev(sc->sc_dev,
   13460 		    "could not acquire SWSM SMBI\n");
   13461 		return 1;
   13462 	}
   13463 
   13464 	/* Get the FW semaphore. */
   13465 	timeout = sc->sc_nvm_wordsize + 1;
   13466 	while (timeout) {
   13467 		swsm = CSR_READ(sc, WMREG_SWSM);
   13468 		swsm |= SWSM_SWESMBI;
   13469 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13470 		/* If we managed to set the bit we got the semaphore. */
   13471 		swsm = CSR_READ(sc, WMREG_SWSM);
   13472 		if (swsm & SWSM_SWESMBI)
   13473 			break;
   13474 
   13475 		delay(50);
   13476 		timeout--;
   13477 	}
   13478 
   13479 	if (timeout == 0) {
   13480 		aprint_error_dev(sc->sc_dev,
   13481 		    "could not acquire SWSM SWESMBI\n");
   13482 		/* Release semaphores */
   13483 		wm_put_swsm_semaphore(sc);
   13484 		return 1;
   13485 	}
   13486 	return 0;
   13487 }
   13488 
   13489 /*
   13490  * Put hardware semaphore.
   13491  * Same as e1000_put_hw_semaphore_generic()
   13492  */
   13493 static void
   13494 wm_put_swsm_semaphore(struct wm_softc *sc)
   13495 {
   13496 	uint32_t swsm;
   13497 
   13498 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13499 		device_xname(sc->sc_dev), __func__));
   13500 
   13501 	swsm = CSR_READ(sc, WMREG_SWSM);
   13502 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13503 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13504 }
   13505 
   13506 /*
   13507  * Get SW/FW semaphore.
   13508  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13509  */
   13510 static int
   13511 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13512 {
   13513 	uint32_t swfw_sync;
   13514 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13515 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13516 	int timeout;
   13517 
   13518 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13519 		device_xname(sc->sc_dev), __func__));
   13520 
   13521 	if (sc->sc_type == WM_T_80003)
   13522 		timeout = 50;
   13523 	else
   13524 		timeout = 200;
   13525 
   13526 	while (timeout) {
   13527 		if (wm_get_swsm_semaphore(sc)) {
   13528 			aprint_error_dev(sc->sc_dev,
   13529 			    "%s: failed to get semaphore\n",
   13530 			    __func__);
   13531 			return 1;
   13532 		}
   13533 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13534 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13535 			swfw_sync |= swmask;
   13536 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13537 			wm_put_swsm_semaphore(sc);
   13538 			return 0;
   13539 		}
   13540 		wm_put_swsm_semaphore(sc);
   13541 		delay(5000);
   13542 		timeout--;
   13543 	}
   13544 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13545 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13546 	return 1;
   13547 }
   13548 
   13549 static void
   13550 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13551 {
   13552 	uint32_t swfw_sync;
   13553 
   13554 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13555 		device_xname(sc->sc_dev), __func__));
   13556 
   13557 	while (wm_get_swsm_semaphore(sc) != 0)
   13558 		continue;
   13559 
   13560 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13561 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13562 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13563 
   13564 	wm_put_swsm_semaphore(sc);
   13565 }
   13566 
   13567 static int
   13568 wm_get_nvm_80003(struct wm_softc *sc)
   13569 {
   13570 	int rv;
   13571 
   13572 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13573 		device_xname(sc->sc_dev), __func__));
   13574 
   13575 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13576 		aprint_error_dev(sc->sc_dev,
   13577 		    "%s: failed to get semaphore(SWFW)\n",
   13578 		    __func__);
   13579 		return rv;
   13580 	}
   13581 
   13582 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13583 	    && (rv = wm_get_eecd(sc)) != 0) {
   13584 		aprint_error_dev(sc->sc_dev,
   13585 		    "%s: failed to get semaphore(EECD)\n",
   13586 		    __func__);
   13587 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13588 		return rv;
   13589 	}
   13590 
   13591 	return 0;
   13592 }
   13593 
   13594 static void
   13595 wm_put_nvm_80003(struct wm_softc *sc)
   13596 {
   13597 
   13598 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13599 		device_xname(sc->sc_dev), __func__));
   13600 
   13601 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13602 		wm_put_eecd(sc);
   13603 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13604 }
   13605 
   13606 static int
   13607 wm_get_nvm_82571(struct wm_softc *sc)
   13608 {
   13609 	int rv;
   13610 
   13611 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13612 		device_xname(sc->sc_dev), __func__));
   13613 
   13614 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13615 		return rv;
   13616 
   13617 	switch (sc->sc_type) {
   13618 	case WM_T_82573:
   13619 		break;
   13620 	default:
   13621 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13622 			rv = wm_get_eecd(sc);
   13623 		break;
   13624 	}
   13625 
   13626 	if (rv != 0) {
   13627 		aprint_error_dev(sc->sc_dev,
   13628 		    "%s: failed to get semaphore\n",
   13629 		    __func__);
   13630 		wm_put_swsm_semaphore(sc);
   13631 	}
   13632 
   13633 	return rv;
   13634 }
   13635 
   13636 static void
   13637 wm_put_nvm_82571(struct wm_softc *sc)
   13638 {
   13639 
   13640 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13641 		device_xname(sc->sc_dev), __func__));
   13642 
   13643 	switch (sc->sc_type) {
   13644 	case WM_T_82573:
   13645 		break;
   13646 	default:
   13647 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13648 			wm_put_eecd(sc);
   13649 		break;
   13650 	}
   13651 
   13652 	wm_put_swsm_semaphore(sc);
   13653 }
   13654 
   13655 static int
   13656 wm_get_phy_82575(struct wm_softc *sc)
   13657 {
   13658 
   13659 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13660 		device_xname(sc->sc_dev), __func__));
   13661 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13662 }
   13663 
   13664 static void
   13665 wm_put_phy_82575(struct wm_softc *sc)
   13666 {
   13667 
   13668 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13669 		device_xname(sc->sc_dev), __func__));
   13670 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13671 }
   13672 
   13673 static int
   13674 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13675 {
   13676 	uint32_t ext_ctrl;
   13677 	int timeout = 200;
   13678 
   13679 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13680 		device_xname(sc->sc_dev), __func__));
   13681 
   13682 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13683 	for (timeout = 0; timeout < 200; timeout++) {
   13684 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13685 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13686 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13687 
   13688 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13689 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13690 			return 0;
   13691 		delay(5000);
   13692 	}
   13693 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13694 	    device_xname(sc->sc_dev), ext_ctrl);
   13695 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13696 	return 1;
   13697 }
   13698 
   13699 static void
   13700 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13701 {
   13702 	uint32_t ext_ctrl;
   13703 
   13704 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13705 		device_xname(sc->sc_dev), __func__));
   13706 
   13707 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13708 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13709 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13710 
   13711 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13712 }
   13713 
   13714 static int
   13715 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13716 {
   13717 	uint32_t ext_ctrl;
   13718 	int timeout;
   13719 
   13720 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13721 		device_xname(sc->sc_dev), __func__));
   13722 	mutex_enter(sc->sc_ich_phymtx);
   13723 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13724 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13725 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13726 			break;
   13727 		delay(1000);
   13728 	}
   13729 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13730 		printf("%s: SW has already locked the resource\n",
   13731 		    device_xname(sc->sc_dev));
   13732 		goto out;
   13733 	}
   13734 
   13735 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13736 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13737 	for (timeout = 0; timeout < 1000; timeout++) {
   13738 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13739 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13740 			break;
   13741 		delay(1000);
   13742 	}
   13743 	if (timeout >= 1000) {
   13744 		printf("%s: failed to acquire semaphore\n",
   13745 		    device_xname(sc->sc_dev));
   13746 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13747 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13748 		goto out;
   13749 	}
   13750 	return 0;
   13751 
   13752 out:
   13753 	mutex_exit(sc->sc_ich_phymtx);
   13754 	return 1;
   13755 }
   13756 
   13757 static void
   13758 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13759 {
   13760 	uint32_t ext_ctrl;
   13761 
   13762 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13763 		device_xname(sc->sc_dev), __func__));
   13764 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13765 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13766 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13767 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13768 	} else {
   13769 		printf("%s: Semaphore unexpectedly released\n",
   13770 		    device_xname(sc->sc_dev));
   13771 	}
   13772 
   13773 	mutex_exit(sc->sc_ich_phymtx);
   13774 }
   13775 
   13776 static int
   13777 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13778 {
   13779 
   13780 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13781 		device_xname(sc->sc_dev), __func__));
   13782 	mutex_enter(sc->sc_ich_nvmmtx);
   13783 
   13784 	return 0;
   13785 }
   13786 
   13787 static void
   13788 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13789 {
   13790 
   13791 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13792 		device_xname(sc->sc_dev), __func__));
   13793 	mutex_exit(sc->sc_ich_nvmmtx);
   13794 }
   13795 
   13796 static int
   13797 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13798 {
   13799 	int i = 0;
   13800 	uint32_t reg;
   13801 
   13802 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13803 		device_xname(sc->sc_dev), __func__));
   13804 
   13805 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13806 	do {
   13807 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13808 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13809 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13810 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13811 			break;
   13812 		delay(2*1000);
   13813 		i++;
   13814 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13815 
   13816 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13817 		wm_put_hw_semaphore_82573(sc);
   13818 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13819 		    device_xname(sc->sc_dev));
   13820 		return -1;
   13821 	}
   13822 
   13823 	return 0;
   13824 }
   13825 
   13826 static void
   13827 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13828 {
   13829 	uint32_t reg;
   13830 
   13831 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13832 		device_xname(sc->sc_dev), __func__));
   13833 
   13834 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13835 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13836 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13837 }
   13838 
   13839 /*
   13840  * Management mode and power management related subroutines.
   13841  * BMC, AMT, suspend/resume and EEE.
   13842  */
   13843 
   13844 #ifdef WM_WOL
   13845 static int
   13846 wm_check_mng_mode(struct wm_softc *sc)
   13847 {
   13848 	int rv;
   13849 
   13850 	switch (sc->sc_type) {
   13851 	case WM_T_ICH8:
   13852 	case WM_T_ICH9:
   13853 	case WM_T_ICH10:
   13854 	case WM_T_PCH:
   13855 	case WM_T_PCH2:
   13856 	case WM_T_PCH_LPT:
   13857 	case WM_T_PCH_SPT:
   13858 	case WM_T_PCH_CNP:
   13859 		rv = wm_check_mng_mode_ich8lan(sc);
   13860 		break;
   13861 	case WM_T_82574:
   13862 	case WM_T_82583:
   13863 		rv = wm_check_mng_mode_82574(sc);
   13864 		break;
   13865 	case WM_T_82571:
   13866 	case WM_T_82572:
   13867 	case WM_T_82573:
   13868 	case WM_T_80003:
   13869 		rv = wm_check_mng_mode_generic(sc);
   13870 		break;
   13871 	default:
   13872 		/* noting to do */
   13873 		rv = 0;
   13874 		break;
   13875 	}
   13876 
   13877 	return rv;
   13878 }
   13879 
   13880 static int
   13881 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13882 {
   13883 	uint32_t fwsm;
   13884 
   13885 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13886 
   13887 	if (((fwsm & FWSM_FW_VALID) != 0)
   13888 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13889 		return 1;
   13890 
   13891 	return 0;
   13892 }
   13893 
   13894 static int
   13895 wm_check_mng_mode_82574(struct wm_softc *sc)
   13896 {
   13897 	uint16_t data;
   13898 
   13899 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13900 
   13901 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13902 		return 1;
   13903 
   13904 	return 0;
   13905 }
   13906 
   13907 static int
   13908 wm_check_mng_mode_generic(struct wm_softc *sc)
   13909 {
   13910 	uint32_t fwsm;
   13911 
   13912 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13913 
   13914 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13915 		return 1;
   13916 
   13917 	return 0;
   13918 }
   13919 #endif /* WM_WOL */
   13920 
   13921 static int
   13922 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13923 {
   13924 	uint32_t manc, fwsm, factps;
   13925 
   13926 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13927 		return 0;
   13928 
   13929 	manc = CSR_READ(sc, WMREG_MANC);
   13930 
   13931 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13932 		device_xname(sc->sc_dev), manc));
   13933 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13934 		return 0;
   13935 
   13936 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13937 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13938 		factps = CSR_READ(sc, WMREG_FACTPS);
   13939 		if (((factps & FACTPS_MNGCG) == 0)
   13940 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13941 			return 1;
   13942 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13943 		uint16_t data;
   13944 
   13945 		factps = CSR_READ(sc, WMREG_FACTPS);
   13946 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13947 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13948 			device_xname(sc->sc_dev), factps, data));
   13949 		if (((factps & FACTPS_MNGCG) == 0)
   13950 		    && ((data & NVM_CFG2_MNGM_MASK)
   13951 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13952 			return 1;
   13953 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13954 	    && ((manc & MANC_ASF_EN) == 0))
   13955 		return 1;
   13956 
   13957 	return 0;
   13958 }
   13959 
   13960 static bool
   13961 wm_phy_resetisblocked(struct wm_softc *sc)
   13962 {
   13963 	bool blocked = false;
   13964 	uint32_t reg;
   13965 	int i = 0;
   13966 
   13967 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13968 		device_xname(sc->sc_dev), __func__));
   13969 
   13970 	switch (sc->sc_type) {
   13971 	case WM_T_ICH8:
   13972 	case WM_T_ICH9:
   13973 	case WM_T_ICH10:
   13974 	case WM_T_PCH:
   13975 	case WM_T_PCH2:
   13976 	case WM_T_PCH_LPT:
   13977 	case WM_T_PCH_SPT:
   13978 	case WM_T_PCH_CNP:
   13979 		do {
   13980 			reg = CSR_READ(sc, WMREG_FWSM);
   13981 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13982 				blocked = true;
   13983 				delay(10*1000);
   13984 				continue;
   13985 			}
   13986 			blocked = false;
   13987 		} while (blocked && (i++ < 30));
   13988 		return blocked;
   13989 		break;
   13990 	case WM_T_82571:
   13991 	case WM_T_82572:
   13992 	case WM_T_82573:
   13993 	case WM_T_82574:
   13994 	case WM_T_82583:
   13995 	case WM_T_80003:
   13996 		reg = CSR_READ(sc, WMREG_MANC);
   13997 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13998 			return true;
   13999 		else
   14000 			return false;
   14001 		break;
   14002 	default:
   14003 		/* no problem */
   14004 		break;
   14005 	}
   14006 
   14007 	return false;
   14008 }
   14009 
   14010 static void
   14011 wm_get_hw_control(struct wm_softc *sc)
   14012 {
   14013 	uint32_t reg;
   14014 
   14015 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14016 		device_xname(sc->sc_dev), __func__));
   14017 
   14018 	if (sc->sc_type == WM_T_82573) {
   14019 		reg = CSR_READ(sc, WMREG_SWSM);
   14020 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14021 	} else if (sc->sc_type >= WM_T_82571) {
   14022 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14023 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14024 	}
   14025 }
   14026 
   14027 static void
   14028 wm_release_hw_control(struct wm_softc *sc)
   14029 {
   14030 	uint32_t reg;
   14031 
   14032 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14033 		device_xname(sc->sc_dev), __func__));
   14034 
   14035 	if (sc->sc_type == WM_T_82573) {
   14036 		reg = CSR_READ(sc, WMREG_SWSM);
   14037 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14038 	} else if (sc->sc_type >= WM_T_82571) {
   14039 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14040 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14041 	}
   14042 }
   14043 
   14044 static void
   14045 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14046 {
   14047 	uint32_t reg;
   14048 
   14049 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14050 		device_xname(sc->sc_dev), __func__));
   14051 
   14052 	if (sc->sc_type < WM_T_PCH2)
   14053 		return;
   14054 
   14055 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14056 
   14057 	if (gate)
   14058 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14059 	else
   14060 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14061 
   14062 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14063 }
   14064 
   14065 static int
   14066 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14067 {
   14068 	uint32_t fwsm, reg;
   14069 	int rv = 0;
   14070 
   14071 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14072 		device_xname(sc->sc_dev), __func__));
   14073 
   14074 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14075 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14076 
   14077 	/* Disable ULP */
   14078 	wm_ulp_disable(sc);
   14079 
   14080 	/* Acquire PHY semaphore */
   14081 	rv = sc->phy.acquire(sc);
   14082 	if (rv != 0) {
   14083 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14084 		device_xname(sc->sc_dev), __func__));
   14085 		return -1;
   14086 	}
   14087 
   14088 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14089 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14090 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14091 	 */
   14092 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14093 	switch (sc->sc_type) {
   14094 	case WM_T_PCH_LPT:
   14095 	case WM_T_PCH_SPT:
   14096 	case WM_T_PCH_CNP:
   14097 		if (wm_phy_is_accessible_pchlan(sc))
   14098 			break;
   14099 
   14100 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14101 		 * forcing MAC to SMBus mode first.
   14102 		 */
   14103 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14104 		reg |= CTRL_EXT_FORCE_SMBUS;
   14105 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14106 #if 0
   14107 		/* XXX Isn't this required??? */
   14108 		CSR_WRITE_FLUSH(sc);
   14109 #endif
   14110 		/* Wait 50 milliseconds for MAC to finish any retries
   14111 		 * that it might be trying to perform from previous
   14112 		 * attempts to acknowledge any phy read requests.
   14113 		 */
   14114 		delay(50 * 1000);
   14115 		/* FALLTHROUGH */
   14116 	case WM_T_PCH2:
   14117 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14118 			break;
   14119 		/* FALLTHROUGH */
   14120 	case WM_T_PCH:
   14121 		if (sc->sc_type == WM_T_PCH)
   14122 			if ((fwsm & FWSM_FW_VALID) != 0)
   14123 				break;
   14124 
   14125 		if (wm_phy_resetisblocked(sc) == true) {
   14126 			printf("XXX reset is blocked(3)\n");
   14127 			break;
   14128 		}
   14129 
   14130 		/* Toggle LANPHYPC Value bit */
   14131 		wm_toggle_lanphypc_pch_lpt(sc);
   14132 
   14133 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14134 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14135 				break;
   14136 
   14137 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14138 			 * so ensure that the MAC is also out of SMBus mode
   14139 			 */
   14140 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14141 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14142 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14143 
   14144 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14145 				break;
   14146 			rv = -1;
   14147 		}
   14148 		break;
   14149 	default:
   14150 		break;
   14151 	}
   14152 
   14153 	/* Release semaphore */
   14154 	sc->phy.release(sc);
   14155 
   14156 	if (rv == 0) {
   14157 		/* Check to see if able to reset PHY.  Print error if not */
   14158 		if (wm_phy_resetisblocked(sc)) {
   14159 			printf("XXX reset is blocked(4)\n");
   14160 			goto out;
   14161 		}
   14162 
   14163 		/* Reset the PHY before any access to it.  Doing so, ensures
   14164 		 * that the PHY is in a known good state before we read/write
   14165 		 * PHY registers.  The generic reset is sufficient here,
   14166 		 * because we haven't determined the PHY type yet.
   14167 		 */
   14168 		if (wm_reset_phy(sc) != 0)
   14169 			goto out;
   14170 
   14171 		/* On a successful reset, possibly need to wait for the PHY
   14172 		 * to quiesce to an accessible state before returning control
   14173 		 * to the calling function.  If the PHY does not quiesce, then
   14174 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14175 		 *  the PHY is in.
   14176 		 */
   14177 		if (wm_phy_resetisblocked(sc))
   14178 			printf("XXX reset is blocked(4)\n");
   14179 	}
   14180 
   14181 out:
   14182 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14183 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14184 		delay(10*1000);
   14185 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14186 	}
   14187 
   14188 	return 0;
   14189 }
   14190 
   14191 static void
   14192 wm_init_manageability(struct wm_softc *sc)
   14193 {
   14194 
   14195 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14196 		device_xname(sc->sc_dev), __func__));
   14197 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14198 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14199 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14200 
   14201 		/* Disable hardware interception of ARP */
   14202 		manc &= ~MANC_ARP_EN;
   14203 
   14204 		/* Enable receiving management packets to the host */
   14205 		if (sc->sc_type >= WM_T_82571) {
   14206 			manc |= MANC_EN_MNG2HOST;
   14207 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14208 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14209 		}
   14210 
   14211 		CSR_WRITE(sc, WMREG_MANC, manc);
   14212 	}
   14213 }
   14214 
   14215 static void
   14216 wm_release_manageability(struct wm_softc *sc)
   14217 {
   14218 
   14219 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14220 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14221 
   14222 		manc |= MANC_ARP_EN;
   14223 		if (sc->sc_type >= WM_T_82571)
   14224 			manc &= ~MANC_EN_MNG2HOST;
   14225 
   14226 		CSR_WRITE(sc, WMREG_MANC, manc);
   14227 	}
   14228 }
   14229 
   14230 static void
   14231 wm_get_wakeup(struct wm_softc *sc)
   14232 {
   14233 
   14234 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14235 	switch (sc->sc_type) {
   14236 	case WM_T_82573:
   14237 	case WM_T_82583:
   14238 		sc->sc_flags |= WM_F_HAS_AMT;
   14239 		/* FALLTHROUGH */
   14240 	case WM_T_80003:
   14241 	case WM_T_82575:
   14242 	case WM_T_82576:
   14243 	case WM_T_82580:
   14244 	case WM_T_I350:
   14245 	case WM_T_I354:
   14246 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14247 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14248 		/* FALLTHROUGH */
   14249 	case WM_T_82541:
   14250 	case WM_T_82541_2:
   14251 	case WM_T_82547:
   14252 	case WM_T_82547_2:
   14253 	case WM_T_82571:
   14254 	case WM_T_82572:
   14255 	case WM_T_82574:
   14256 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14257 		break;
   14258 	case WM_T_ICH8:
   14259 	case WM_T_ICH9:
   14260 	case WM_T_ICH10:
   14261 	case WM_T_PCH:
   14262 	case WM_T_PCH2:
   14263 	case WM_T_PCH_LPT:
   14264 	case WM_T_PCH_SPT:
   14265 	case WM_T_PCH_CNP:
   14266 		sc->sc_flags |= WM_F_HAS_AMT;
   14267 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14268 		break;
   14269 	default:
   14270 		break;
   14271 	}
   14272 
   14273 	/* 1: HAS_MANAGE */
   14274 	if (wm_enable_mng_pass_thru(sc) != 0)
   14275 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14276 
   14277 	/*
   14278 	 * Note that the WOL flags is set after the resetting of the eeprom
   14279 	 * stuff
   14280 	 */
   14281 }
   14282 
   14283 /*
   14284  * Unconfigure Ultra Low Power mode.
   14285  * Only for I217 and newer (see below).
   14286  */
   14287 static int
   14288 wm_ulp_disable(struct wm_softc *sc)
   14289 {
   14290 	uint32_t reg;
   14291 	uint16_t phyreg;
   14292 	int i = 0, rv = 0;
   14293 
   14294 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14295 		device_xname(sc->sc_dev), __func__));
   14296 	/* Exclude old devices */
   14297 	if ((sc->sc_type < WM_T_PCH_LPT)
   14298 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14299 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14300 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14301 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14302 		return 0;
   14303 
   14304 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14305 		/* Request ME un-configure ULP mode in the PHY */
   14306 		reg = CSR_READ(sc, WMREG_H2ME);
   14307 		reg &= ~H2ME_ULP;
   14308 		reg |= H2ME_ENFORCE_SETTINGS;
   14309 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14310 
   14311 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14312 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14313 			if (i++ == 30) {
   14314 				printf("%s timed out\n", __func__);
   14315 				return -1;
   14316 			}
   14317 			delay(10 * 1000);
   14318 		}
   14319 		reg = CSR_READ(sc, WMREG_H2ME);
   14320 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14321 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14322 
   14323 		return 0;
   14324 	}
   14325 
   14326 	/* Acquire semaphore */
   14327 	rv = sc->phy.acquire(sc);
   14328 	if (rv != 0) {
   14329 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14330 		device_xname(sc->sc_dev), __func__));
   14331 		return -1;
   14332 	}
   14333 
   14334 	/* Toggle LANPHYPC */
   14335 	wm_toggle_lanphypc_pch_lpt(sc);
   14336 
   14337 	/* Unforce SMBus mode in PHY */
   14338 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14339 	if (rv != 0) {
   14340 		uint32_t reg2;
   14341 
   14342 		printf("%s: Force SMBus first.\n", __func__);
   14343 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14344 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14345 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14346 		delay(50 * 1000);
   14347 
   14348 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14349 		    &phyreg);
   14350 		if (rv != 0)
   14351 			goto release;
   14352 	}
   14353 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14354 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14355 
   14356 	/* Unforce SMBus mode in MAC */
   14357 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14358 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14359 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14360 
   14361 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14362 	if (rv != 0)
   14363 		goto release;
   14364 	phyreg |= HV_PM_CTRL_K1_ENA;
   14365 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14366 
   14367 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14368 		&phyreg);
   14369 	if (rv != 0)
   14370 		goto release;
   14371 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14372 	    | I218_ULP_CONFIG1_STICKY_ULP
   14373 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14374 	    | I218_ULP_CONFIG1_WOL_HOST
   14375 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14376 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14377 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14378 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14379 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14380 	phyreg |= I218_ULP_CONFIG1_START;
   14381 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14382 
   14383 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14384 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14385 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14386 
   14387 release:
   14388 	/* Release semaphore */
   14389 	sc->phy.release(sc);
   14390 	wm_gmii_reset(sc);
   14391 	delay(50 * 1000);
   14392 
   14393 	return rv;
   14394 }
   14395 
   14396 /* WOL in the newer chipset interfaces (pchlan) */
   14397 static int
   14398 wm_enable_phy_wakeup(struct wm_softc *sc)
   14399 {
   14400 	device_t dev = sc->sc_dev;
   14401 	uint32_t mreg, moff;
   14402 	uint16_t wuce, wuc, wufc, preg;
   14403 	int i, rv;
   14404 
   14405 	KASSERT(sc->sc_type >= WM_T_PCH);
   14406 
   14407 	/* Copy MAC RARs to PHY RARs */
   14408 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14409 
   14410 	/* Activate PHY wakeup */
   14411 	rv = sc->phy.acquire(sc);
   14412 	if (rv != 0) {
   14413 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14414 		    __func__);
   14415 		return rv;
   14416 	}
   14417 
   14418 	/*
   14419 	 * Enable access to PHY wakeup registers.
   14420 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14421 	 */
   14422 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14423 	if (rv != 0) {
   14424 		device_printf(dev,
   14425 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14426 		goto release;
   14427 	}
   14428 
   14429 	/* Copy MAC MTA to PHY MTA */
   14430 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14431 		uint16_t lo, hi;
   14432 
   14433 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14434 		lo = (uint16_t)(mreg & 0xffff);
   14435 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14436 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14437 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14438 	}
   14439 
   14440 	/* Configure PHY Rx Control register */
   14441 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14442 	mreg = CSR_READ(sc, WMREG_RCTL);
   14443 	if (mreg & RCTL_UPE)
   14444 		preg |= BM_RCTL_UPE;
   14445 	if (mreg & RCTL_MPE)
   14446 		preg |= BM_RCTL_MPE;
   14447 	preg &= ~(BM_RCTL_MO_MASK);
   14448 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14449 	if (moff != 0)
   14450 		preg |= moff << BM_RCTL_MO_SHIFT;
   14451 	if (mreg & RCTL_BAM)
   14452 		preg |= BM_RCTL_BAM;
   14453 	if (mreg & RCTL_PMCF)
   14454 		preg |= BM_RCTL_PMCF;
   14455 	mreg = CSR_READ(sc, WMREG_CTRL);
   14456 	if (mreg & CTRL_RFCE)
   14457 		preg |= BM_RCTL_RFCE;
   14458 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14459 
   14460 	wuc = WUC_APME | WUC_PME_EN;
   14461 	wufc = WUFC_MAG;
   14462 	/* Enable PHY wakeup in MAC register */
   14463 	CSR_WRITE(sc, WMREG_WUC,
   14464 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14465 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14466 
   14467 	/* Configure and enable PHY wakeup in PHY registers */
   14468 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14469 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14470 
   14471 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14472 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14473 
   14474 release:
   14475 	sc->phy.release(sc);
   14476 
   14477 	return 0;
   14478 }
   14479 
   14480 /* Power down workaround on D3 */
   14481 static void
   14482 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14483 {
   14484 	uint32_t reg;
   14485 	int i;
   14486 
   14487 	for (i = 0; i < 2; i++) {
   14488 		/* Disable link */
   14489 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14490 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14491 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14492 
   14493 		/*
   14494 		 * Call gig speed drop workaround on Gig disable before
   14495 		 * accessing any PHY registers
   14496 		 */
   14497 		if (sc->sc_type == WM_T_ICH8)
   14498 			wm_gig_downshift_workaround_ich8lan(sc);
   14499 
   14500 		/* Write VR power-down enable */
   14501 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14502 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14503 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14504 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14505 
   14506 		/* Read it back and test */
   14507 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14508 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14509 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14510 			break;
   14511 
   14512 		/* Issue PHY reset and repeat at most one more time */
   14513 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14514 	}
   14515 }
   14516 
   14517 /*
   14518  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14519  *  @sc: pointer to the HW structure
   14520  *
   14521  *  During S0 to Sx transition, it is possible the link remains at gig
   14522  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14523  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14524  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14525  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14526  *  needs to be written.
   14527  *  Parts that support (and are linked to a partner which support) EEE in
   14528  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14529  *  than 10Mbps w/o EEE.
   14530  */
   14531 static void
   14532 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14533 {
   14534 	uint32_t phy_ctrl;
   14535 
   14536 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14537 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14538 
   14539 	if (sc->sc_phytype == WMPHY_I217) {
   14540 		uint16_t devid = sc->sc_pcidevid;
   14541 
   14542 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14543 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14544 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14545 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14546 		    (sc->sc_type >= WM_T_PCH_SPT))
   14547 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14548 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14549 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14550 
   14551 #if 0 /* notyet */
   14552 		if (sc->phy.acquire(sc) != 0)
   14553 			goto out;
   14554 
   14555 		/* XXX Do workaround for EEE */
   14556 
   14557 		/*
   14558 		 * For i217 Intel Rapid Start Technology support,
   14559 		 * when the system is going into Sx and no manageability engine
   14560 		 * is present, the driver must configure proxy to reset only on
   14561 		 * power good.	LPI (Low Power Idle) state must also reset only
   14562 		 * on power good, as well as the MTA (Multicast table array).
   14563 		 * The SMBus release must also be disabled on LCD reset.
   14564 		 */
   14565 
   14566 		/*
   14567 		 * Enable MTA to reset for Intel Rapid Start Technology
   14568 		 * Support
   14569 		 */
   14570 
   14571 		sc->phy.release(sc);
   14572 #endif
   14573 	}
   14574 #if 0
   14575 out:
   14576 #endif
   14577 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14578 
   14579 	if (sc->sc_type == WM_T_ICH8)
   14580 		wm_gig_downshift_workaround_ich8lan(sc);
   14581 
   14582 	if (sc->sc_type >= WM_T_PCH) {
   14583 		wm_oem_bits_config_ich8lan(sc, false);
   14584 
   14585 		/* Reset PHY to activate OEM bits on 82577/8 */
   14586 		if (sc->sc_type == WM_T_PCH)
   14587 			wm_reset_phy(sc);
   14588 
   14589 		if (sc->phy.acquire(sc) != 0)
   14590 			return;
   14591 		wm_write_smbus_addr(sc);
   14592 		sc->phy.release(sc);
   14593 	}
   14594 }
   14595 
   14596 /*
   14597  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14598  *  @sc: pointer to the HW structure
   14599  *
   14600  *  During Sx to S0 transitions on non-managed devices or managed devices
   14601  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14602  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14603  *  the PHY.
   14604  *  On i217, setup Intel Rapid Start Technology.
   14605  */
   14606 static int
   14607 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14608 {
   14609 	device_t dev = sc->sc_dev;
   14610 	int rv;
   14611 
   14612 	if (sc->sc_type < WM_T_PCH2)
   14613 		return 0;
   14614 
   14615 	rv = wm_init_phy_workarounds_pchlan(sc);
   14616 	if (rv != 0)
   14617 		return -1;
   14618 
   14619 	/* For i217 Intel Rapid Start Technology support when the system
   14620 	 * is transitioning from Sx and no manageability engine is present
   14621 	 * configure SMBus to restore on reset, disable proxy, and enable
   14622 	 * the reset on MTA (Multicast table array).
   14623 	 */
   14624 	if (sc->sc_phytype == WMPHY_I217) {
   14625 		uint16_t phy_reg;
   14626 
   14627 		if (sc->phy.acquire(sc) != 0)
   14628 			return -1;
   14629 
   14630 		/* Clear Auto Enable LPI after link up */
   14631 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14632 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14633 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14634 
   14635 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14636 			/* Restore clear on SMB if no manageability engine
   14637 			 * is present
   14638 			 */
   14639 			sc->phy.readreg_locked(dev, 1, I217_MEMPWR, &phy_reg);
   14640 			if (rv != 0)
   14641 				goto release;
   14642 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14643 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14644 
   14645 			/* Disable Proxy */
   14646 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14647 		}
   14648 		/* Enable reset on MTA */
   14649 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14650 		if (rv != 0)
   14651 			goto release;
   14652 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14653 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14654 
   14655 release:
   14656 		sc->phy.release(sc);
   14657 		return rv;
   14658 	}
   14659 
   14660 	return 0;
   14661 }
   14662 
   14663 static void
   14664 wm_enable_wakeup(struct wm_softc *sc)
   14665 {
   14666 	uint32_t reg, pmreg;
   14667 	pcireg_t pmode;
   14668 	int rv = 0;
   14669 
   14670 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14671 		device_xname(sc->sc_dev), __func__));
   14672 
   14673 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14674 	    &pmreg, NULL) == 0)
   14675 		return;
   14676 
   14677 	if ((sc->sc_flags & WM_F_WOL) == 0)
   14678 		goto pme;
   14679 
   14680 	/* Advertise the wakeup capability */
   14681 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14682 	    | CTRL_SWDPIN(3));
   14683 
   14684 	/* Keep the laser running on fiber adapters */
   14685 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14686 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14687 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14688 		reg |= CTRL_EXT_SWDPIN(3);
   14689 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14690 	}
   14691 
   14692 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   14693 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   14694 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   14695 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   14696 		wm_suspend_workarounds_ich8lan(sc);
   14697 
   14698 #if 0	/* for the multicast packet */
   14699 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14700 	reg |= WUFC_MC;
   14701 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14702 #endif
   14703 
   14704 	if (sc->sc_type >= WM_T_PCH) {
   14705 		rv = wm_enable_phy_wakeup(sc);
   14706 		if (rv != 0)
   14707 			goto pme;
   14708 	} else {
   14709 		/* Enable wakeup by the MAC */
   14710 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   14711 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   14712 	}
   14713 
   14714 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14715 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14716 		|| (sc->sc_type == WM_T_PCH2))
   14717 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14718 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14719 
   14720 pme:
   14721 	/* Request PME */
   14722 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14723 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   14724 		/* For WOL */
   14725 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14726 	} else {
   14727 		/* Disable WOL */
   14728 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14729 	}
   14730 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14731 }
   14732 
   14733 /* Disable ASPM L0s and/or L1 for workaround */
   14734 static void
   14735 wm_disable_aspm(struct wm_softc *sc)
   14736 {
   14737 	pcireg_t reg, mask = 0;
   14738 	unsigned const char *str = "";
   14739 
   14740 	/*
   14741 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14742 	 * space.
   14743 	 */
   14744 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14745 		return;
   14746 
   14747 	switch (sc->sc_type) {
   14748 	case WM_T_82571:
   14749 	case WM_T_82572:
   14750 		/*
   14751 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14752 		 * State Power management L1 State (ASPM L1).
   14753 		 */
   14754 		mask = PCIE_LCSR_ASPM_L1;
   14755 		str = "L1 is";
   14756 		break;
   14757 	case WM_T_82573:
   14758 	case WM_T_82574:
   14759 	case WM_T_82583:
   14760 		/*
   14761 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14762 		 *
   14763 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14764 		 * some chipset.  The document of 82574 and 82583 says that
   14765 		 * disabling L0s with some specific chipset is sufficient,
   14766 		 * but we follow as of the Intel em driver does.
   14767 		 *
   14768 		 * References:
   14769 		 * Errata 8 of the Specification Update of i82573.
   14770 		 * Errata 20 of the Specification Update of i82574.
   14771 		 * Errata 9 of the Specification Update of i82583.
   14772 		 */
   14773 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14774 		str = "L0s and L1 are";
   14775 		break;
   14776 	default:
   14777 		return;
   14778 	}
   14779 
   14780 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14781 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14782 	reg &= ~mask;
   14783 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14784 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14785 
   14786 	/* Print only in wm_attach() */
   14787 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14788 		aprint_verbose_dev(sc->sc_dev,
   14789 		    "ASPM %s disabled to workaround the errata.\n", str);
   14790 }
   14791 
   14792 /* LPLU */
   14793 
   14794 static void
   14795 wm_lplu_d0_disable(struct wm_softc *sc)
   14796 {
   14797 	struct mii_data *mii = &sc->sc_mii;
   14798 	uint32_t reg;
   14799 
   14800 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14801 		device_xname(sc->sc_dev), __func__));
   14802 
   14803 	if (sc->sc_phytype == WMPHY_IFE)
   14804 		return;
   14805 
   14806 	switch (sc->sc_type) {
   14807 	case WM_T_82571:
   14808 	case WM_T_82572:
   14809 	case WM_T_82573:
   14810 	case WM_T_82575:
   14811 	case WM_T_82576:
   14812 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14813 		reg &= ~PMR_D0_LPLU;
   14814 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14815 		break;
   14816 	case WM_T_82580:
   14817 	case WM_T_I350:
   14818 	case WM_T_I210:
   14819 	case WM_T_I211:
   14820 		reg = CSR_READ(sc, WMREG_PHPM);
   14821 		reg &= ~PHPM_D0A_LPLU;
   14822 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14823 		break;
   14824 	case WM_T_82574:
   14825 	case WM_T_82583:
   14826 	case WM_T_ICH8:
   14827 	case WM_T_ICH9:
   14828 	case WM_T_ICH10:
   14829 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14830 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14831 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14832 		CSR_WRITE_FLUSH(sc);
   14833 		break;
   14834 	case WM_T_PCH:
   14835 	case WM_T_PCH2:
   14836 	case WM_T_PCH_LPT:
   14837 	case WM_T_PCH_SPT:
   14838 	case WM_T_PCH_CNP:
   14839 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14840 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14841 		if (wm_phy_resetisblocked(sc) == false)
   14842 			reg |= HV_OEM_BITS_ANEGNOW;
   14843 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14844 		break;
   14845 	default:
   14846 		break;
   14847 	}
   14848 }
   14849 
   14850 /* EEE */
   14851 
   14852 static void
   14853 wm_set_eee_i350(struct wm_softc *sc)
   14854 {
   14855 	uint32_t ipcnfg, eeer;
   14856 
   14857 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14858 	eeer = CSR_READ(sc, WMREG_EEER);
   14859 
   14860 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14861 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14862 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14863 		    | EEER_LPI_FC);
   14864 	} else {
   14865 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14866 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14867 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14868 		    | EEER_LPI_FC);
   14869 	}
   14870 
   14871 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14872 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14873 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14874 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14875 }
   14876 
   14877 /*
   14878  * Workarounds (mainly PHY related).
   14879  * Basically, PHY's workarounds are in the PHY drivers.
   14880  */
   14881 
   14882 /* Work-around for 82566 Kumeran PCS lock loss */
   14883 static void
   14884 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14885 {
   14886 	struct mii_data *mii = &sc->sc_mii;
   14887 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14888 	int i;
   14889 	int reg;
   14890 
   14891 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14892 		device_xname(sc->sc_dev), __func__));
   14893 
   14894 	/* If the link is not up, do nothing */
   14895 	if ((status & STATUS_LU) == 0)
   14896 		return;
   14897 
   14898 	/* Nothing to do if the link is other than 1Gbps */
   14899 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14900 		return;
   14901 
   14902 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14903 	for (i = 0; i < 10; i++) {
   14904 		/* read twice */
   14905 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14906 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14907 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14908 			goto out;	/* GOOD! */
   14909 
   14910 		/* Reset the PHY */
   14911 		wm_reset_phy(sc);
   14912 		delay(5*1000);
   14913 	}
   14914 
   14915 	/* Disable GigE link negotiation */
   14916 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14917 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14918 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14919 
   14920 	/*
   14921 	 * Call gig speed drop workaround on Gig disable before accessing
   14922 	 * any PHY registers.
   14923 	 */
   14924 	wm_gig_downshift_workaround_ich8lan(sc);
   14925 
   14926 out:
   14927 	return;
   14928 }
   14929 
   14930 /*
   14931  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   14932  *  @sc: pointer to the HW structure
   14933  *
   14934  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   14935  *  LPLU, Gig disable, MDIC PHY reset):
   14936  *    1) Set Kumeran Near-end loopback
   14937  *    2) Clear Kumeran Near-end loopback
   14938  *  Should only be called for ICH8[m] devices with any 1G Phy.
   14939  */
   14940 static void
   14941 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14942 {
   14943 	uint16_t kmreg;
   14944 
   14945 	/* Only for igp3 */
   14946 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14947 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14948 			return;
   14949 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14950 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14951 			return;
   14952 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14953 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14954 	}
   14955 }
   14956 
   14957 /*
   14958  * Workaround for pch's PHYs
   14959  * XXX should be moved to new PHY driver?
   14960  */
   14961 static void
   14962 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   14963 {
   14964 
   14965 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14966 		device_xname(sc->sc_dev), __func__));
   14967 	KASSERT(sc->sc_type == WM_T_PCH);
   14968 
   14969 	if (sc->sc_phytype == WMPHY_82577)
   14970 		wm_set_mdio_slow_mode_hv(sc);
   14971 
   14972 	/* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14973 
   14974 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14975 
   14976 	/* 82578 */
   14977 	if (sc->sc_phytype == WMPHY_82578) {
   14978 		struct mii_softc *child;
   14979 
   14980 		/*
   14981 		 * Return registers to default by doing a soft reset then
   14982 		 * writing 0x3140 to the control register
   14983 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14984 		 */
   14985 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14986 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14987 			PHY_RESET(child);
   14988 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14989 			    0x3140);
   14990 		}
   14991 	}
   14992 
   14993 	/* Select page 0 */
   14994 	sc->phy.acquire(sc);
   14995 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14996 	sc->phy.release(sc);
   14997 
   14998 	/*
   14999 	 * Configure the K1 Si workaround during phy reset assuming there is
   15000 	 * link so that it disables K1 if link is in 1Gbps.
   15001 	 */
   15002 	wm_k1_gig_workaround_hv(sc, 1);
   15003 }
   15004 
   15005 /*
   15006  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15007  *  @sc:   pointer to the HW structure
   15008  */
   15009 static void
   15010 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15011 {
   15012 	device_t dev = sc->sc_dev;
   15013 	uint32_t mac_reg;
   15014 	uint16_t i, wuce;
   15015 	int count;
   15016 
   15017 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15018 		device_xname(sc->sc_dev), __func__));
   15019 
   15020 	if (sc->phy.acquire(sc) != 0)
   15021 		return;
   15022 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15023 		goto release;
   15024 
   15025 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15026 	count = wm_rar_count(sc);
   15027 	for (i = 0; i < count; i++) {
   15028 		uint16_t lo, hi;
   15029 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15030 		lo = (uint16_t)(mac_reg & 0xffff);
   15031 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15032 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15033 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15034 
   15035 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15036 		lo = (uint16_t)(mac_reg & 0xffff);
   15037 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15038 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15039 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15040 	}
   15041 
   15042 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15043 
   15044 release:
   15045 	sc->phy.release(sc);
   15046 }
   15047 
   15048 /*
   15049  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15050  *  done after every PHY reset.
   15051  */
   15052 static void
   15053 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15054 {
   15055 
   15056 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15057 		device_xname(sc->sc_dev), __func__));
   15058 	KASSERT(sc->sc_type == WM_T_PCH2);
   15059 
   15060 	/* Set MDIO slow mode before any other MDIO access */
   15061 	wm_set_mdio_slow_mode_hv(sc);
   15062 
   15063 	/* XXX set MSE higher to enable link to stay up when noise is high */
   15064 	/* XXX drop link after 5 times MSE threshold was reached */
   15065 }
   15066 
   15067 /**
   15068  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15069  *  @link: link up bool flag
   15070  *
   15071  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15072  *  preventing further DMA write requests.  Workaround the issue by disabling
   15073  *  the de-assertion of the clock request when in 1Gpbs mode.
   15074  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15075  *  speeds in order to avoid Tx hangs.
   15076  **/
   15077 static int
   15078 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15079 {
   15080 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15081 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15082 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15083 	uint16_t phyreg;
   15084 
   15085 	if (link && (speed == STATUS_SPEED_1000)) {
   15086 		sc->phy.acquire(sc);
   15087 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15088 		    &phyreg);
   15089 		if (rv != 0)
   15090 			goto release;
   15091 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15092 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15093 		if (rv != 0)
   15094 			goto release;
   15095 		delay(20);
   15096 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15097 
   15098 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15099 		    &phyreg);
   15100 release:
   15101 		sc->phy.release(sc);
   15102 		return rv;
   15103 	}
   15104 
   15105 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15106 
   15107 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15108 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15109 	    || !link
   15110 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15111 		goto update_fextnvm6;
   15112 
   15113 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   15114 
   15115 	/* Clear link status transmit timeout */
   15116 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15117 	if (speed == STATUS_SPEED_100) {
   15118 		/* Set inband Tx timeout to 5x10us for 100Half */
   15119 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15120 
   15121 		/* Do not extend the K1 entry latency for 100Half */
   15122 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15123 	} else {
   15124 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15125 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15126 
   15127 		/* Extend the K1 entry latency for 10 Mbps */
   15128 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15129 	}
   15130 
   15131 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15132 
   15133 update_fextnvm6:
   15134 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15135 	return 0;
   15136 }
   15137 
   15138 /*
   15139  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15140  *  @sc:   pointer to the HW structure
   15141  *  @link: link up bool flag
   15142  *
   15143  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15144  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15145  *  If link is down, the function will restore the default K1 setting located
   15146  *  in the NVM.
   15147  */
   15148 static int
   15149 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15150 {
   15151 	int k1_enable = sc->sc_nvm_k1_enabled;
   15152 
   15153 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15154 		device_xname(sc->sc_dev), __func__));
   15155 
   15156 	if (sc->phy.acquire(sc) != 0)
   15157 		return -1;
   15158 
   15159 	if (link) {
   15160 		k1_enable = 0;
   15161 
   15162 		/* Link stall fix for link up */
   15163 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15164 		    0x0100);
   15165 	} else {
   15166 		/* Link stall fix for link down */
   15167 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15168 		    0x4100);
   15169 	}
   15170 
   15171 	wm_configure_k1_ich8lan(sc, k1_enable);
   15172 	sc->phy.release(sc);
   15173 
   15174 	return 0;
   15175 }
   15176 
   15177 /*
   15178  *  wm_k1_workaround_lv - K1 Si workaround
   15179  *  @sc:   pointer to the HW structure
   15180  *
   15181  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15182  *  Disable K1 for 1000 and 100 speeds
   15183  */
   15184 static int
   15185 wm_k1_workaround_lv(struct wm_softc *sc)
   15186 {
   15187 	uint32_t reg;
   15188 	int phyreg;
   15189 
   15190 	if (sc->sc_type != WM_T_PCH2)
   15191 		return 0;
   15192 
   15193 	/* Set K1 beacon duration based on 10Mbps speed */
   15194 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS);
   15195 
   15196 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15197 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15198 		if (phyreg &
   15199 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15200 			/* LV 1G/100 Packet drop issue wa  */
   15201 			phyreg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL);
   15202 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15203 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL, phyreg);
   15204 		} else {
   15205 			/* For 10Mbps */
   15206 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15207 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15208 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15209 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15210 		}
   15211 	}
   15212 
   15213 	return 0;
   15214 }
   15215 
   15216 /*
   15217  *  wm_link_stall_workaround_hv - Si workaround
   15218  *  @sc: pointer to the HW structure
   15219  *
   15220  *  This function works around a Si bug where the link partner can get
   15221  *  a link up indication before the PHY does. If small packets are sent
   15222  *  by the link partner they can be placed in the packet buffer without
   15223  *  being properly accounted for by the PHY and will stall preventing
   15224  *  further packets from being received.  The workaround is to clear the
   15225  *  packet buffer after the PHY detects link up.
   15226  */
   15227 static int
   15228 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15229 {
   15230 	int phyreg;
   15231 
   15232 	if (sc->sc_phytype != WMPHY_82578)
   15233 		return 0;
   15234 
   15235 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15236 	phyreg =  wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR);
   15237 	if ((phyreg & BMCR_LOOP) != 0)
   15238 		return 0;
   15239 
   15240 	/* check if link is up and at 1Gbps */
   15241 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS);
   15242 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15243 	    | BM_CS_STATUS_SPEED_MASK;
   15244 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15245 		| BM_CS_STATUS_SPEED_1000))
   15246 		return 0;
   15247 
   15248 	delay(200 * 1000);	/* XXX too big */
   15249 
   15250 	/* flush the packets in the fifo buffer */
   15251 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15252 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15253 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15254 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15255 
   15256 	return 0;
   15257 }
   15258 
   15259 static void
   15260 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15261 {
   15262 	uint32_t reg;
   15263 
   15264 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   15265 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15266 	    reg | HV_KMRN_MDIO_SLOW);
   15267 }
   15268 
   15269 /*
   15270  *  wm_configure_k1_ich8lan - Configure K1 power state
   15271  *  @sc: pointer to the HW structure
   15272  *  @enable: K1 state to configure
   15273  *
   15274  *  Configure the K1 power state based on the provided parameter.
   15275  *  Assumes semaphore already acquired.
   15276  */
   15277 static void
   15278 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15279 {
   15280 	uint32_t ctrl, ctrl_ext, tmp;
   15281 	uint16_t kmreg;
   15282 	int rv;
   15283 
   15284 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15285 
   15286 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15287 	if (rv != 0)
   15288 		return;
   15289 
   15290 	if (k1_enable)
   15291 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15292 	else
   15293 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15294 
   15295 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15296 	if (rv != 0)
   15297 		return;
   15298 
   15299 	delay(20);
   15300 
   15301 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15302 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15303 
   15304 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15305 	tmp |= CTRL_FRCSPD;
   15306 
   15307 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15308 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15309 	CSR_WRITE_FLUSH(sc);
   15310 	delay(20);
   15311 
   15312 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15313 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15314 	CSR_WRITE_FLUSH(sc);
   15315 	delay(20);
   15316 
   15317 	return;
   15318 }
   15319 
   15320 /* special case - for 82575 - need to do manual init ... */
   15321 static void
   15322 wm_reset_init_script_82575(struct wm_softc *sc)
   15323 {
   15324 	/*
   15325 	 * remark: this is untested code - we have no board without EEPROM
   15326 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15327 	 */
   15328 
   15329 	/* SerDes configuration via SERDESCTRL */
   15330 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15331 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15332 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15333 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15334 
   15335 	/* CCM configuration via CCMCTL register */
   15336 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15337 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15338 
   15339 	/* PCIe lanes configuration */
   15340 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15341 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15342 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15343 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15344 
   15345 	/* PCIe PLL Configuration */
   15346 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15347 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15348 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15349 }
   15350 
   15351 static void
   15352 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15353 {
   15354 	uint32_t reg;
   15355 	uint16_t nvmword;
   15356 	int rv;
   15357 
   15358 	if (sc->sc_type != WM_T_82580)
   15359 		return;
   15360 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15361 		return;
   15362 
   15363 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15364 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15365 	if (rv != 0) {
   15366 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15367 		    __func__);
   15368 		return;
   15369 	}
   15370 
   15371 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15372 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15373 		reg |= MDICNFG_DEST;
   15374 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15375 		reg |= MDICNFG_COM_MDIO;
   15376 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15377 }
   15378 
   15379 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15380 
   15381 static bool
   15382 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15383 {
   15384 	uint32_t reg;
   15385 	uint16_t id1, id2;
   15386 	int i, rv;
   15387 
   15388 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15389 		device_xname(sc->sc_dev), __func__));
   15390 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15391 
   15392 	id1 = id2 = 0xffff;
   15393 	for (i = 0; i < 2; i++) {
   15394 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15395 		    &id1);
   15396 		if ((rv != 0) || MII_INVALIDID(id1))
   15397 			continue;
   15398 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15399 		    &id2);
   15400 		if ((rv != 0) || MII_INVALIDID(id2))
   15401 			continue;
   15402 		break;
   15403 	}
   15404 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15405 		goto out;
   15406 
   15407 	/*
   15408 	 * In case the PHY needs to be in mdio slow mode,
   15409 	 * set slow mode and try to get the PHY id again.
   15410 	 */
   15411 	if (sc->sc_type < WM_T_PCH_LPT) {
   15412 		sc->phy.release(sc);
   15413 		wm_set_mdio_slow_mode_hv(sc);
   15414 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   15415 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   15416 		sc->phy.acquire(sc);
   15417 	}
   15418 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15419 		printf("XXX return with false\n");
   15420 		return false;
   15421 	}
   15422 out:
   15423 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15424 		/* Only unforce SMBus if ME is not active */
   15425 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15426 			uint16_t phyreg;
   15427 
   15428 			/* Unforce SMBus mode in PHY */
   15429 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15430 			    CV_SMB_CTRL, &phyreg);
   15431 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15432 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15433 			    CV_SMB_CTRL, phyreg);
   15434 
   15435 			/* Unforce SMBus mode in MAC */
   15436 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15437 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15438 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15439 		}
   15440 	}
   15441 	return true;
   15442 }
   15443 
   15444 static void
   15445 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15446 {
   15447 	uint32_t reg;
   15448 	int i;
   15449 
   15450 	/* Set PHY Config Counter to 50msec */
   15451 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15452 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15453 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15454 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15455 
   15456 	/* Toggle LANPHYPC */
   15457 	reg = CSR_READ(sc, WMREG_CTRL);
   15458 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15459 	reg &= ~CTRL_LANPHYPC_VALUE;
   15460 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15461 	CSR_WRITE_FLUSH(sc);
   15462 	delay(1000);
   15463 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15464 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15465 	CSR_WRITE_FLUSH(sc);
   15466 
   15467 	if (sc->sc_type < WM_T_PCH_LPT)
   15468 		delay(50 * 1000);
   15469 	else {
   15470 		i = 20;
   15471 
   15472 		do {
   15473 			delay(5 * 1000);
   15474 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15475 		    && i--);
   15476 
   15477 		delay(30 * 1000);
   15478 	}
   15479 }
   15480 
   15481 static int
   15482 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15483 {
   15484 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   15485 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   15486 	uint32_t rxa;
   15487 	uint16_t scale = 0, lat_enc = 0;
   15488 	int32_t obff_hwm = 0;
   15489 	int64_t lat_ns, value;
   15490 
   15491 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15492 		device_xname(sc->sc_dev), __func__));
   15493 
   15494 	if (link) {
   15495 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   15496 		uint32_t status;
   15497 		uint16_t speed;
   15498 		pcireg_t preg;
   15499 
   15500 		status = CSR_READ(sc, WMREG_STATUS);
   15501 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   15502 		case STATUS_SPEED_10:
   15503 			speed = 10;
   15504 			break;
   15505 		case STATUS_SPEED_100:
   15506 			speed = 100;
   15507 			break;
   15508 		case STATUS_SPEED_1000:
   15509 			speed = 1000;
   15510 			break;
   15511 		default:
   15512 			device_printf(sc->sc_dev, "Unknown speed "
   15513 			    "(status = %08x)\n", status);
   15514 			return -1;
   15515 		}
   15516 
   15517 		/* Rx Packet Buffer Allocation size (KB) */
   15518 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   15519 
   15520 		/*
   15521 		 * Determine the maximum latency tolerated by the device.
   15522 		 *
   15523 		 * Per the PCIe spec, the tolerated latencies are encoded as
   15524 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   15525 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   15526 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   15527 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   15528 		 */
   15529 		lat_ns = ((int64_t)rxa * 1024 -
   15530 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   15531 			+ ETHER_HDR_LEN))) * 8 * 1000;
   15532 		if (lat_ns < 0)
   15533 			lat_ns = 0;
   15534 		else
   15535 			lat_ns /= speed;
   15536 		value = lat_ns;
   15537 
   15538 		while (value > LTRV_VALUE) {
   15539 			scale ++;
   15540 			value = howmany(value, __BIT(5));
   15541 		}
   15542 		if (scale > LTRV_SCALE_MAX) {
   15543 			printf("%s: Invalid LTR latency scale %d\n",
   15544 			    device_xname(sc->sc_dev), scale);
   15545 			return -1;
   15546 		}
   15547 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   15548 
   15549 		/* Determine the maximum latency tolerated by the platform */
   15550 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15551 		    WM_PCI_LTR_CAP_LPT);
   15552 		max_snoop = preg & 0xffff;
   15553 		max_nosnoop = preg >> 16;
   15554 
   15555 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   15556 
   15557 		if (lat_enc > max_ltr_enc) {
   15558 			lat_enc = max_ltr_enc;
   15559 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   15560 			    * PCI_LTR_SCALETONS(
   15561 				    __SHIFTOUT(lat_enc,
   15562 					PCI_LTR_MAXSNOOPLAT_SCALE));
   15563 		}
   15564 
   15565 		if (lat_ns) {
   15566 			lat_ns *= speed * 1000;
   15567 			lat_ns /= 8;
   15568 			lat_ns /= 1000000000;
   15569 			obff_hwm = (int32_t)(rxa - lat_ns);
   15570 		}
   15571 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   15572 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   15573 			    "(rxa = %d, lat_ns = %d)\n",
   15574 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   15575 			return -1;
   15576 		}
   15577 	}
   15578 	/* Snoop and No-Snoop latencies the same */
   15579 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   15580 	CSR_WRITE(sc, WMREG_LTRV, reg);
   15581 
   15582 	/* Set OBFF high water mark */
   15583 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   15584 	reg |= obff_hwm;
   15585 	CSR_WRITE(sc, WMREG_SVT, reg);
   15586 
   15587 	/* Enable OBFF */
   15588 	reg = CSR_READ(sc, WMREG_SVCR);
   15589 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   15590 	CSR_WRITE(sc, WMREG_SVCR, reg);
   15591 
   15592 	return 0;
   15593 }
   15594 
   15595 /*
   15596  * I210 Errata 25 and I211 Errata 10
   15597  * Slow System Clock.
   15598  */
   15599 static void
   15600 wm_pll_workaround_i210(struct wm_softc *sc)
   15601 {
   15602 	uint32_t mdicnfg, wuc;
   15603 	uint32_t reg;
   15604 	pcireg_t pcireg;
   15605 	uint32_t pmreg;
   15606 	uint16_t nvmword, tmp_nvmword;
   15607 	int phyval;
   15608 	bool wa_done = false;
   15609 	int i;
   15610 
   15611 	/* Save WUC and MDICNFG registers */
   15612 	wuc = CSR_READ(sc, WMREG_WUC);
   15613 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   15614 
   15615 	reg = mdicnfg & ~MDICNFG_DEST;
   15616 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15617 
   15618 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   15619 		nvmword = INVM_DEFAULT_AL;
   15620 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   15621 
   15622 	/* Get Power Management cap offset */
   15623 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15624 		&pmreg, NULL) == 0)
   15625 		return;
   15626 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   15627 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   15628 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   15629 
   15630 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   15631 			break; /* OK */
   15632 		}
   15633 
   15634 		wa_done = true;
   15635 		/* Directly reset the internal PHY */
   15636 		reg = CSR_READ(sc, WMREG_CTRL);
   15637 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   15638 
   15639 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15640 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   15641 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15642 
   15643 		CSR_WRITE(sc, WMREG_WUC, 0);
   15644 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   15645 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15646 
   15647 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15648 		    pmreg + PCI_PMCSR);
   15649 		pcireg |= PCI_PMCSR_STATE_D3;
   15650 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15651 		    pmreg + PCI_PMCSR, pcireg);
   15652 		delay(1000);
   15653 		pcireg &= ~PCI_PMCSR_STATE_D3;
   15654 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15655 		    pmreg + PCI_PMCSR, pcireg);
   15656 
   15657 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   15658 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15659 
   15660 		/* Restore WUC register */
   15661 		CSR_WRITE(sc, WMREG_WUC, wuc);
   15662 	}
   15663 
   15664 	/* Restore MDICNFG setting */
   15665 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   15666 	if (wa_done)
   15667 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   15668 }
   15669 
   15670 static void
   15671 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   15672 {
   15673 	uint32_t reg;
   15674 
   15675 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15676 		device_xname(sc->sc_dev), __func__));
   15677 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   15678 	    || (sc->sc_type == WM_T_PCH_CNP));
   15679 
   15680 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15681 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   15682 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15683 
   15684 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   15685 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   15686 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   15687 }
   15688