Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.617
      1 /*	$NetBSD: if_wm.c,v 1.617 2019/01/22 03:42:27 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.617 2019/01/22 03:42:27 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 
    142 #include <dev/pci/pcireg.h>
    143 #include <dev/pci/pcivar.h>
    144 #include <dev/pci/pcidevs.h>
    145 
    146 #include <dev/pci/if_wmreg.h>
    147 #include <dev/pci/if_wmvar.h>
    148 
    149 #ifdef WM_DEBUG
    150 #define	WM_DEBUG_LINK		__BIT(0)
    151 #define	WM_DEBUG_TX		__BIT(1)
    152 #define	WM_DEBUG_RX		__BIT(2)
    153 #define	WM_DEBUG_GMII		__BIT(3)
    154 #define	WM_DEBUG_MANAGE		__BIT(4)
    155 #define	WM_DEBUG_NVM		__BIT(5)
    156 #define	WM_DEBUG_INIT		__BIT(6)
    157 #define	WM_DEBUG_LOCK		__BIT(7)
    158 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    159     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    160 
    161 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    162 #else
    163 #define	DPRINTF(x, y)	/* nothing */
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #else
    170 #define CALLOUT_FLAGS	0
    171 #endif
    172 
    173 /*
    174  * This device driver's max interrupt numbers.
    175  */
    176 #define WM_MAX_NQUEUEINTR	16
    177 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    178 
    179 #ifndef WM_DISABLE_MSI
    180 #define	WM_DISABLE_MSI 0
    181 #endif
    182 #ifndef WM_DISABLE_MSIX
    183 #define	WM_DISABLE_MSIX 0
    184 #endif
    185 
    186 int wm_disable_msi = WM_DISABLE_MSI;
    187 int wm_disable_msix = WM_DISABLE_MSIX;
    188 
    189 #ifndef WM_WATCHDOG_TIMEOUT
    190 #define WM_WATCHDOG_TIMEOUT 5
    191 #endif
    192 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    193 
    194 /*
    195  * Transmit descriptor list size.  Due to errata, we can only have
    196  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    197  * on >= 82544. We tell the upper layers that they can queue a lot
    198  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    199  * of them at a time.
    200  *
    201  * We allow up to 64 DMA segments per packet.  Pathological packet
    202  * chains containing many small mbufs have been observed in zero-copy
    203  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    204  * m_defrag() is called to reduce it.
    205  */
    206 #define	WM_NTXSEGS		64
    207 #define	WM_IFQUEUELEN		256
    208 #define	WM_TXQUEUELEN_MAX	64
    209 #define	WM_TXQUEUELEN_MAX_82547	16
    210 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    211 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    212 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    213 #define	WM_NTXDESC_82542	256
    214 #define	WM_NTXDESC_82544	4096
    215 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    216 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    217 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    218 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    219 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    220 
    221 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    222 
    223 #define	WM_TXINTERQSIZE		256
    224 
    225 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    226 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    227 #endif
    228 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    229 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    230 #endif
    231 
    232 /*
    233  * Receive descriptor list size.  We have one Rx buffer for normal
    234  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    235  * packet.  We allocate 256 receive descriptors, each with a 2k
    236  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    237  */
    238 #define	WM_NRXDESC		256
    239 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    240 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    241 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    242 
    243 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    244 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    245 #endif
    246 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    247 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    248 #endif
    249 
    250 typedef union txdescs {
    251 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    252 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    253 } txdescs_t;
    254 
    255 typedef union rxdescs {
    256 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    257 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    258 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    259 } rxdescs_t;
    260 
    261 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    262 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    263 
    264 /*
    265  * Software state for transmit jobs.
    266  */
    267 struct wm_txsoft {
    268 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    269 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    270 	int txs_firstdesc;		/* first descriptor in packet */
    271 	int txs_lastdesc;		/* last descriptor in packet */
    272 	int txs_ndesc;			/* # of descriptors used */
    273 };
    274 
    275 /*
    276  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    277  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    278  * them together.
    279  */
    280 struct wm_rxsoft {
    281 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    282 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    283 };
    284 
    285 #define WM_LINKUP_TIMEOUT	50
    286 
    287 static uint16_t swfwphysem[] = {
    288 	SWFW_PHY0_SM,
    289 	SWFW_PHY1_SM,
    290 	SWFW_PHY2_SM,
    291 	SWFW_PHY3_SM
    292 };
    293 
    294 static const uint32_t wm_82580_rxpbs_table[] = {
    295 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    296 };
    297 
    298 struct wm_softc;
    299 
    300 #ifdef WM_EVENT_COUNTERS
    301 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    302 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    303 	struct evcnt qname##_ev_##evname;
    304 
    305 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    306 	do {								\
    307 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    308 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    309 		    "%s%02d%s", #qname, (qnum), #evname);		\
    310 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    311 		    (evtype), NULL, (xname),				\
    312 		    (q)->qname##_##evname##_evcnt_name);		\
    313 	} while (0)
    314 
    315 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    316 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    317 
    318 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    319 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    320 
    321 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    322 	evcnt_detach(&(q)->qname##_ev_##evname);
    323 #endif /* WM_EVENT_COUNTERS */
    324 
    325 struct wm_txqueue {
    326 	kmutex_t *txq_lock;		/* lock for tx operations */
    327 
    328 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    329 
    330 	/* Software state for the transmit descriptors. */
    331 	int txq_num;			/* must be a power of two */
    332 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    333 
    334 	/* TX control data structures. */
    335 	int txq_ndesc;			/* must be a power of two */
    336 	size_t txq_descsize;		/* a tx descriptor size */
    337 	txdescs_t *txq_descs_u;
    338 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    339 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    340 	int txq_desc_rseg;		/* real number of control segment */
    341 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    342 #define	txq_descs	txq_descs_u->sctxu_txdescs
    343 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    344 
    345 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    346 
    347 	int txq_free;			/* number of free Tx descriptors */
    348 	int txq_next;			/* next ready Tx descriptor */
    349 
    350 	int txq_sfree;			/* number of free Tx jobs */
    351 	int txq_snext;			/* next free Tx job */
    352 	int txq_sdirty;			/* dirty Tx jobs */
    353 
    354 	/* These 4 variables are used only on the 82547. */
    355 	int txq_fifo_size;		/* Tx FIFO size */
    356 	int txq_fifo_head;		/* current head of FIFO */
    357 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    358 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    359 
    360 	/*
    361 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    362 	 * CPUs. This queue intermediate them without block.
    363 	 */
    364 	pcq_t *txq_interq;
    365 
    366 	/*
    367 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    368 	 * to manage Tx H/W queue's busy flag.
    369 	 */
    370 	int txq_flags;			/* flags for H/W queue, see below */
    371 #define	WM_TXQ_NO_SPACE	0x1
    372 
    373 	bool txq_stopping;
    374 
    375 	bool txq_sending;
    376 	time_t txq_lastsent;
    377 
    378 	uint32_t txq_packets;		/* for AIM */
    379 	uint32_t txq_bytes;		/* for AIM */
    380 #ifdef WM_EVENT_COUNTERS
    381 	/* TX event counters */
    382 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    383 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    384 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    385 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    386 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    387 					    /* XXX not used? */
    388 
    389 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    392 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    393 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    394 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    395 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    396 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    397 					    /* other than toomanyseg */
    398 
    399 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    400 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    401 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    402 
    403 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    404 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    405 #endif /* WM_EVENT_COUNTERS */
    406 };
    407 
    408 struct wm_rxqueue {
    409 	kmutex_t *rxq_lock;		/* lock for rx operations */
    410 
    411 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    412 
    413 	/* Software state for the receive descriptors. */
    414 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    415 
    416 	/* RX control data structures. */
    417 	int rxq_ndesc;			/* must be a power of two */
    418 	size_t rxq_descsize;		/* a rx descriptor size */
    419 	rxdescs_t *rxq_descs_u;
    420 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    421 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    422 	int rxq_desc_rseg;		/* real number of control segment */
    423 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    424 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    425 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    426 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    427 
    428 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    429 
    430 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    431 	int rxq_discard;
    432 	int rxq_len;
    433 	struct mbuf *rxq_head;
    434 	struct mbuf *rxq_tail;
    435 	struct mbuf **rxq_tailp;
    436 
    437 	bool rxq_stopping;
    438 
    439 	uint32_t rxq_packets;		/* for AIM */
    440 	uint32_t rxq_bytes;		/* for AIM */
    441 #ifdef WM_EVENT_COUNTERS
    442 	/* RX event counters */
    443 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    444 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    445 
    446 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    447 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    448 #endif
    449 };
    450 
    451 struct wm_queue {
    452 	int wmq_id;			/* index of TX/RX queues */
    453 	int wmq_intr_idx;		/* index of MSI-X tables */
    454 
    455 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    456 	bool wmq_set_itr;
    457 
    458 	struct wm_txqueue wmq_txq;
    459 	struct wm_rxqueue wmq_rxq;
    460 
    461 	void *wmq_si;
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	int sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	krndsource_t rnd_source;	/* random source */
    592 
    593 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    594 
    595 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    596 	kmutex_t *sc_ich_phymtx;	/*
    597 					 * 82574/82583/ICH/PCH specific PHY
    598 					 * mutex. For 82574/82583, the mutex
    599 					 * is used for both PHY and NVM.
    600 					 */
    601 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    602 
    603 	struct wm_phyop phy;
    604 	struct wm_nvmop nvm;
    605 };
    606 
    607 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    610 
    611 #define	WM_RXCHAIN_RESET(rxq)						\
    612 do {									\
    613 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    614 	*(rxq)->rxq_tailp = NULL;					\
    615 	(rxq)->rxq_len = 0;						\
    616 } while (/*CONSTCOND*/0)
    617 
    618 #define	WM_RXCHAIN_LINK(rxq, m)						\
    619 do {									\
    620 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    621 	(rxq)->rxq_tailp = &(m)->m_next;				\
    622 } while (/*CONSTCOND*/0)
    623 
    624 #ifdef WM_EVENT_COUNTERS
    625 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    626 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    627 
    628 #define WM_Q_EVCNT_INCR(qname, evname)			\
    629 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    630 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    631 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    632 #else /* !WM_EVENT_COUNTERS */
    633 #define	WM_EVCNT_INCR(ev)	/* nothing */
    634 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    635 
    636 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    637 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    638 #endif /* !WM_EVENT_COUNTERS */
    639 
    640 #define	CSR_READ(sc, reg)						\
    641 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    642 #define	CSR_WRITE(sc, reg, val)						\
    643 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    644 #define	CSR_WRITE_FLUSH(sc)						\
    645 	(void) CSR_READ((sc), WMREG_STATUS)
    646 
    647 #define ICH8_FLASH_READ32(sc, reg)					\
    648 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset)
    650 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    651 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    652 	    (reg) + sc->sc_flashreg_offset, (data))
    653 
    654 #define ICH8_FLASH_READ16(sc, reg)					\
    655 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset)
    657 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    658 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    659 	    (reg) + sc->sc_flashreg_offset, (data))
    660 
    661 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    662 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    663 
    664 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    665 #define	WM_CDTXADDR_HI(txq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    668 
    669 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    670 #define	WM_CDRXADDR_HI(rxq, x)						\
    671 	(sizeof(bus_addr_t) == 8 ?					\
    672 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    673 
    674 /*
    675  * Register read/write functions.
    676  * Other than CSR_{READ|WRITE}().
    677  */
    678 #if 0
    679 static inline uint32_t wm_io_read(struct wm_softc *, int);
    680 #endif
    681 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    682 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    683     uint32_t, uint32_t);
    684 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    685 
    686 /*
    687  * Descriptor sync/init functions.
    688  */
    689 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    690 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    691 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    692 
    693 /*
    694  * Device driver interface functions and commonly used functions.
    695  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    696  */
    697 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    698 static int	wm_match(device_t, cfdata_t, void *);
    699 static void	wm_attach(device_t, device_t, void *);
    700 static int	wm_detach(device_t, int);
    701 static bool	wm_suspend(device_t, const pmf_qual_t *);
    702 static bool	wm_resume(device_t, const pmf_qual_t *);
    703 static void	wm_watchdog(struct ifnet *);
    704 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    705     uint16_t *);
    706 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_tick(void *);
    709 static int	wm_ifflags_cb(struct ethercom *);
    710 static int	wm_ioctl(struct ifnet *, u_long, void *);
    711 /* MAC address related */
    712 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    713 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    714 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    715 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    716 static int	wm_rar_count(struct wm_softc *);
    717 static void	wm_set_filter(struct wm_softc *);
    718 /* Reset and init related */
    719 static void	wm_set_vlan(struct wm_softc *);
    720 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    721 static void	wm_get_auto_rd_done(struct wm_softc *);
    722 static void	wm_lan_init_done(struct wm_softc *);
    723 static void	wm_get_cfg_done(struct wm_softc *);
    724 static int	wm_phy_post_reset(struct wm_softc *);
    725 static int	wm_write_smbus_addr(struct wm_softc *);
    726 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    727 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    728 static void	wm_initialize_hardware_bits(struct wm_softc *);
    729 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    730 static int	wm_reset_phy(struct wm_softc *);
    731 static void	wm_flush_desc_rings(struct wm_softc *);
    732 static void	wm_reset(struct wm_softc *);
    733 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    734 static void	wm_rxdrain(struct wm_rxqueue *);
    735 static void	wm_init_rss(struct wm_softc *);
    736 static void	wm_adjust_qnum(struct wm_softc *, int);
    737 static inline bool	wm_is_using_msix(struct wm_softc *);
    738 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    739 static int	wm_softint_establish(struct wm_softc *, int, int);
    740 static int	wm_setup_legacy(struct wm_softc *);
    741 static int	wm_setup_msix(struct wm_softc *);
    742 static int	wm_init(struct ifnet *);
    743 static int	wm_init_locked(struct ifnet *);
    744 static void	wm_unset_stopping_flags(struct wm_softc *);
    745 static void	wm_set_stopping_flags(struct wm_softc *);
    746 static void	wm_stop(struct ifnet *, int);
    747 static void	wm_stop_locked(struct ifnet *, int);
    748 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    749 static void	wm_82547_txfifo_stall(void *);
    750 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    751 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    752 /* DMA related */
    753 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    754 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_txqueue *);
    758 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    761     struct wm_rxqueue *);
    762 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    763 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    766 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    769     struct wm_txqueue *);
    770 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_rxqueue *);
    772 static int	wm_alloc_txrx_queues(struct wm_softc *);
    773 static void	wm_free_txrx_queues(struct wm_softc *);
    774 static int	wm_init_txrx_queues(struct wm_softc *);
    775 /* Start */
    776 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    777     struct wm_txsoft *, uint32_t *, uint8_t *);
    778 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    779 static void	wm_start(struct ifnet *);
    780 static void	wm_start_locked(struct ifnet *);
    781 static int	wm_transmit(struct ifnet *, struct mbuf *);
    782 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    783 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    784     bool);
    785 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    786     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    787 static void	wm_nq_start(struct ifnet *);
    788 static void	wm_nq_start_locked(struct ifnet *);
    789 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    790 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    791 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    792     bool);
    793 static void	wm_deferred_start_locked(struct wm_txqueue *);
    794 static void	wm_handle_queue(void *);
    795 /* Interrupt */
    796 static bool	wm_txeof(struct wm_txqueue *, u_int);
    797 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    798 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    799 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr(struct wm_softc *, uint32_t);
    802 static int	wm_intr_legacy(void *);
    803 static inline void	wm_txrxintr_disable(struct wm_queue *);
    804 static inline void	wm_txrxintr_enable(struct wm_queue *);
    805 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    806 static int	wm_txrxintr_msix(void *);
    807 static int	wm_linkintr_msix(void *);
    808 
    809 /*
    810  * Media related.
    811  * GMII, SGMII, TBI, SERDES and SFP.
    812  */
    813 /* Common */
    814 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    815 /* GMII related */
    816 static void	wm_gmii_reset(struct wm_softc *);
    817 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    818 static int	wm_get_phy_id_82575(struct wm_softc *);
    819 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    820 static int	wm_gmii_mediachange(struct ifnet *);
    821 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    822 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    823 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    824 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    825 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    826 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    827 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    828 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    830 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    831 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    833 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    834 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    835 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    836 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    837 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    839 	bool);
    840 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    842 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    843 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    844 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    845 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    846 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    847 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    848 static void	wm_gmii_statchg(struct ifnet *);
    849 /*
    850  * kumeran related (80003, ICH* and PCH*).
    851  * These functions are not for accessing MII registers but for accessing
    852  * kumeran specific registers.
    853  */
    854 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    855 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    857 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    858 /* EMI register related */
    859 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    860 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    861 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    862 /* SGMII */
    863 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    864 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    865 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    866 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    867 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    868 /* TBI related */
    869 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    870 static void	wm_tbi_mediainit(struct wm_softc *);
    871 static int	wm_tbi_mediachange(struct ifnet *);
    872 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    873 static int	wm_check_for_link(struct wm_softc *);
    874 static void	wm_tbi_tick(struct wm_softc *);
    875 /* SERDES related */
    876 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    877 static int	wm_serdes_mediachange(struct ifnet *);
    878 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    879 static void	wm_serdes_tick(struct wm_softc *);
    880 /* SFP related */
    881 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    882 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    883 
    884 /*
    885  * NVM related.
    886  * Microwire, SPI (w/wo EERD) and Flash.
    887  */
    888 /* Misc functions */
    889 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    890 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    891 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    892 /* Microwire */
    893 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    894 /* SPI */
    895 static int	wm_nvm_ready_spi(struct wm_softc *);
    896 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    897 /* Using with EERD */
    898 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    899 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    900 /* Flash */
    901 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    902     unsigned int *);
    903 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    904 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    905 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    906     uint32_t *);
    907 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    908 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    909 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    910 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    911 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    912 /* iNVM */
    913 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    914 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    915 /* Lock, detecting NVM type, validate checksum and read */
    916 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    917 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    918 static int	wm_nvm_validate_checksum(struct wm_softc *);
    919 static void	wm_nvm_version_invm(struct wm_softc *);
    920 static void	wm_nvm_version(struct wm_softc *);
    921 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    922 
    923 /*
    924  * Hardware semaphores.
    925  * Very complexed...
    926  */
    927 static int	wm_get_null(struct wm_softc *);
    928 static void	wm_put_null(struct wm_softc *);
    929 static int	wm_get_eecd(struct wm_softc *);
    930 static void	wm_put_eecd(struct wm_softc *);
    931 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    932 static void	wm_put_swsm_semaphore(struct wm_softc *);
    933 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    934 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static int	wm_get_nvm_80003(struct wm_softc *);
    936 static void	wm_put_nvm_80003(struct wm_softc *);
    937 static int	wm_get_nvm_82571(struct wm_softc *);
    938 static void	wm_put_nvm_82571(struct wm_softc *);
    939 static int	wm_get_phy_82575(struct wm_softc *);
    940 static void	wm_put_phy_82575(struct wm_softc *);
    941 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    942 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    943 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    944 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    945 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    946 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    947 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    948 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    949 
    950 /*
    951  * Management mode and power management related subroutines.
    952  * BMC, AMT, suspend/resume and EEE.
    953  */
    954 #if 0
    955 static int	wm_check_mng_mode(struct wm_softc *);
    956 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    957 static int	wm_check_mng_mode_82574(struct wm_softc *);
    958 static int	wm_check_mng_mode_generic(struct wm_softc *);
    959 #endif
    960 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    961 static bool	wm_phy_resetisblocked(struct wm_softc *);
    962 static void	wm_get_hw_control(struct wm_softc *);
    963 static void	wm_release_hw_control(struct wm_softc *);
    964 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    965 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    966 static void	wm_init_manageability(struct wm_softc *);
    967 static void	wm_release_manageability(struct wm_softc *);
    968 static void	wm_get_wakeup(struct wm_softc *);
    969 static int	wm_ulp_disable(struct wm_softc *);
    970 static int	wm_enable_phy_wakeup(struct wm_softc *);
    971 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    973 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    974 static void	wm_enable_wakeup(struct wm_softc *);
    975 static void	wm_disable_aspm(struct wm_softc *);
    976 /* LPLU (Low Power Link Up) */
    977 static void	wm_lplu_d0_disable(struct wm_softc *);
    978 /* EEE */
    979 static int	wm_set_eee_i350(struct wm_softc *);
    980 static int	wm_set_eee_pchlan(struct wm_softc *);
    981 static int	wm_set_eee(struct wm_softc *);
    982 
    983 /*
    984  * Workarounds (mainly PHY related).
    985  * Basically, PHY's workarounds are in the PHY drivers.
    986  */
    987 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    989 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    990 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    991 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    993 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    994 static int	wm_k1_workaround_lv(struct wm_softc *);
    995 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    996 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    997 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    998 static void	wm_reset_init_script_82575(struct wm_softc *);
    999 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1000 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1001 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1002 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1003 static int	wm_pll_workaround_i210(struct wm_softc *);
   1004 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1005 
   1006 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1007     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1008 
   1009 /*
   1010  * Devices supported by this driver.
   1011  */
   1012 static const struct wm_product {
   1013 	pci_vendor_id_t		wmp_vendor;
   1014 	pci_product_id_t	wmp_product;
   1015 	const char		*wmp_name;
   1016 	wm_chip_type		wmp_type;
   1017 	uint32_t		wmp_flags;
   1018 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1019 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1020 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1021 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1022 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1023 } wm_products[] = {
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1025 	  "Intel i82542 1000BASE-X Ethernet",
   1026 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1029 	  "Intel i82543GC 1000BASE-X Ethernet",
   1030 	  WM_T_82543,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1033 	  "Intel i82543GC 1000BASE-T Ethernet",
   1034 	  WM_T_82543,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1037 	  "Intel i82544EI 1000BASE-T Ethernet",
   1038 	  WM_T_82544,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1041 	  "Intel i82544EI 1000BASE-X Ethernet",
   1042 	  WM_T_82544,		WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1045 	  "Intel i82544GC 1000BASE-T Ethernet",
   1046 	  WM_T_82544,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1049 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1050 	  WM_T_82544,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1053 	  "Intel i82540EM 1000BASE-T Ethernet",
   1054 	  WM_T_82540,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1057 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1058 	  WM_T_82540,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1061 	  "Intel i82540EP 1000BASE-T Ethernet",
   1062 	  WM_T_82540,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1065 	  "Intel i82540EP 1000BASE-T Ethernet",
   1066 	  WM_T_82540,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1069 	  "Intel i82540EP 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1073 	  "Intel i82545EM 1000BASE-T Ethernet",
   1074 	  WM_T_82545,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1077 	  "Intel i82545GM 1000BASE-T Ethernet",
   1078 	  WM_T_82545_3,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1081 	  "Intel i82545GM 1000BASE-X Ethernet",
   1082 	  WM_T_82545_3,		WMP_F_FIBER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1085 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1086 	  WM_T_82545_3,		WMP_F_SERDES },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1089 	  "Intel i82546EB 1000BASE-T Ethernet",
   1090 	  WM_T_82546,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1093 	  "Intel i82546EB 1000BASE-T Ethernet",
   1094 	  WM_T_82546,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1097 	  "Intel i82545EM 1000BASE-X Ethernet",
   1098 	  WM_T_82545,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1101 	  "Intel i82546EB 1000BASE-X Ethernet",
   1102 	  WM_T_82546,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1105 	  "Intel i82546GB 1000BASE-T Ethernet",
   1106 	  WM_T_82546_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1109 	  "Intel i82546GB 1000BASE-X Ethernet",
   1110 	  WM_T_82546_3,		WMP_F_FIBER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1113 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1114 	  WM_T_82546_3,		WMP_F_SERDES },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1117 	  "i82546GB quad-port Gigabit Ethernet",
   1118 	  WM_T_82546_3,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1121 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1125 	  "Intel PRO/1000MT (82546GB)",
   1126 	  WM_T_82546_3,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1129 	  "Intel i82541EI 1000BASE-T Ethernet",
   1130 	  WM_T_82541,		WMP_F_COPPER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1133 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1134 	  WM_T_82541,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1137 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1138 	  WM_T_82541,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1141 	  "Intel i82541ER 1000BASE-T Ethernet",
   1142 	  WM_T_82541_2,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1145 	  "Intel i82541GI 1000BASE-T Ethernet",
   1146 	  WM_T_82541_2,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1149 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1150 	  WM_T_82541_2,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1153 	  "Intel i82541PI 1000BASE-T Ethernet",
   1154 	  WM_T_82541_2,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1157 	  "Intel i82547EI 1000BASE-T Ethernet",
   1158 	  WM_T_82547,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1161 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1162 	  WM_T_82547,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1165 	  "Intel i82547GI 1000BASE-T Ethernet",
   1166 	  WM_T_82547_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1169 	  "Intel PRO/1000 PT (82571EB)",
   1170 	  WM_T_82571,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1173 	  "Intel PRO/1000 PF (82571EB)",
   1174 	  WM_T_82571,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1177 	  "Intel PRO/1000 PB (82571EB)",
   1178 	  WM_T_82571,		WMP_F_SERDES },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1181 	  "Intel PRO/1000 QT (82571EB)",
   1182 	  WM_T_82571,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1185 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1186 	  WM_T_82571,		WMP_F_COPPER, },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1189 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1190 	  WM_T_82571,		WMP_F_COPPER, },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1193 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1194 	  WM_T_82571,		WMP_F_SERDES, },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1197 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1198 	  WM_T_82571,		WMP_F_SERDES, },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1201 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1202 	  WM_T_82571,		WMP_F_FIBER, },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1205 	  "Intel i82572EI 1000baseT Ethernet",
   1206 	  WM_T_82572,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1209 	  "Intel i82572EI 1000baseX Ethernet",
   1210 	  WM_T_82572,		WMP_F_FIBER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1213 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82572,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1217 	  "Intel i82572EI 1000baseT Ethernet",
   1218 	  WM_T_82572,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1221 	  "Intel i82573E",
   1222 	  WM_T_82573,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1225 	  "Intel i82573E IAMT",
   1226 	  WM_T_82573,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1229 	  "Intel i82573L Gigabit Ethernet",
   1230 	  WM_T_82573,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1233 	  "Intel i82574L",
   1234 	  WM_T_82574,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1237 	  "Intel i82574L",
   1238 	  WM_T_82574,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1241 	  "Intel i82583V",
   1242 	  WM_T_82583,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1245 	  "i80003 dual 1000baseT Ethernet",
   1246 	  WM_T_80003,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1249 	  "i80003 dual 1000baseX Ethernet",
   1250 	  WM_T_80003,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1253 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1254 	  WM_T_80003,		WMP_F_SERDES },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1257 	  "Intel i80003 1000baseT Ethernet",
   1258 	  WM_T_80003,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1261 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1262 	  WM_T_80003,		WMP_F_SERDES },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1265 	  "Intel i82801H (M_AMT) LAN Controller",
   1266 	  WM_T_ICH8,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1268 	  "Intel i82801H (AMT) LAN Controller",
   1269 	  WM_T_ICH8,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1271 	  "Intel i82801H LAN Controller",
   1272 	  WM_T_ICH8,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1274 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1275 	  WM_T_ICH8,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1277 	  "Intel i82801H (M) LAN Controller",
   1278 	  WM_T_ICH8,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1280 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1281 	  WM_T_ICH8,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1283 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1284 	  WM_T_ICH8,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1286 	  "82567V-3 LAN Controller",
   1287 	  WM_T_ICH8,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1289 	  "82801I (AMT) LAN Controller",
   1290 	  WM_T_ICH9,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1292 	  "82801I 10/100 LAN Controller",
   1293 	  WM_T_ICH9,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1295 	  "82801I (G) 10/100 LAN Controller",
   1296 	  WM_T_ICH9,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1298 	  "82801I (GT) 10/100 LAN Controller",
   1299 	  WM_T_ICH9,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1301 	  "82801I (C) LAN Controller",
   1302 	  WM_T_ICH9,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1304 	  "82801I mobile LAN Controller",
   1305 	  WM_T_ICH9,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1307 	  "82801I mobile (V) LAN Controller",
   1308 	  WM_T_ICH9,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1310 	  "82801I mobile (AMT) LAN Controller",
   1311 	  WM_T_ICH9,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1313 	  "82567LM-4 LAN Controller",
   1314 	  WM_T_ICH9,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1316 	  "82567LM-2 LAN Controller",
   1317 	  WM_T_ICH10,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1319 	  "82567LF-2 LAN Controller",
   1320 	  WM_T_ICH10,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1322 	  "82567LM-3 LAN Controller",
   1323 	  WM_T_ICH10,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1325 	  "82567LF-3 LAN Controller",
   1326 	  WM_T_ICH10,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1328 	  "82567V-2 LAN Controller",
   1329 	  WM_T_ICH10,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1331 	  "82567V-3? LAN Controller",
   1332 	  WM_T_ICH10,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1334 	  "HANKSVILLE LAN Controller",
   1335 	  WM_T_ICH10,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1337 	  "PCH LAN (82577LM) Controller",
   1338 	  WM_T_PCH,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1340 	  "PCH LAN (82577LC) Controller",
   1341 	  WM_T_PCH,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1343 	  "PCH LAN (82578DM) Controller",
   1344 	  WM_T_PCH,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1346 	  "PCH LAN (82578DC) Controller",
   1347 	  WM_T_PCH,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1349 	  "PCH2 LAN (82579LM) Controller",
   1350 	  WM_T_PCH2,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1352 	  "PCH2 LAN (82579V) Controller",
   1353 	  WM_T_PCH2,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1355 	  "82575EB dual-1000baseT Ethernet",
   1356 	  WM_T_82575,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1358 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1359 	  WM_T_82575,		WMP_F_SERDES },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1361 	  "82575GB quad-1000baseT Ethernet",
   1362 	  WM_T_82575,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1364 	  "82575GB quad-1000baseT Ethernet (PM)",
   1365 	  WM_T_82575,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1367 	  "82576 1000BaseT Ethernet",
   1368 	  WM_T_82576,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1370 	  "82576 1000BaseX Ethernet",
   1371 	  WM_T_82576,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1374 	  "82576 gigabit Ethernet (SERDES)",
   1375 	  WM_T_82576,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1378 	  "82576 quad-1000BaseT Ethernet",
   1379 	  WM_T_82576,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1382 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1383 	  WM_T_82576,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1386 	  "82576 gigabit Ethernet",
   1387 	  WM_T_82576,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1393 	  "82576 quad-gigabit Ethernet (SERDES)",
   1394 	  WM_T_82576,		WMP_F_SERDES },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1397 	  "82580 1000BaseT Ethernet",
   1398 	  WM_T_82580,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1400 	  "82580 1000BaseX Ethernet",
   1401 	  WM_T_82580,		WMP_F_FIBER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1404 	  "82580 1000BaseT Ethernet (SERDES)",
   1405 	  WM_T_82580,		WMP_F_SERDES },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1408 	  "82580 gigabit Ethernet (SGMII)",
   1409 	  WM_T_82580,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1411 	  "82580 dual-1000BaseT Ethernet",
   1412 	  WM_T_82580,		WMP_F_COPPER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1415 	  "82580 quad-1000BaseX Ethernet",
   1416 	  WM_T_82580,		WMP_F_FIBER },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1419 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1420 	  WM_T_82580,		WMP_F_COPPER },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1423 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1424 	  WM_T_82580,		WMP_F_SERDES },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1427 	  "DH89XXCC 1000BASE-KX Ethernet",
   1428 	  WM_T_82580,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1431 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1432 	  WM_T_82580,		WMP_F_SERDES },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1435 	  "I350 Gigabit Network Connection",
   1436 	  WM_T_I350,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1439 	  "I350 Gigabit Fiber Network Connection",
   1440 	  WM_T_I350,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1443 	  "I350 Gigabit Backplane Connection",
   1444 	  WM_T_I350,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1447 	  "I350 Quad Port Gigabit Ethernet",
   1448 	  WM_T_I350,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1451 	  "I350 Gigabit Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1455 	  "I354 Gigabit Ethernet (KX)",
   1456 	  WM_T_I354,		WMP_F_SERDES },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1459 	  "I354 Gigabit Ethernet (SGMII)",
   1460 	  WM_T_I354,		WMP_F_COPPER },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1463 	  "I354 Gigabit Ethernet (2.5G)",
   1464 	  WM_T_I354,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1467 	  "I210-T1 Ethernet Server Adapter",
   1468 	  WM_T_I210,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1471 	  "I210 Ethernet (Copper OEM)",
   1472 	  WM_T_I210,		WMP_F_COPPER },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1475 	  "I210 Ethernet (Copper IT)",
   1476 	  WM_T_I210,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1479 	  "I210 Ethernet (FLASH less)",
   1480 	  WM_T_I210,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1483 	  "I210 Gigabit Ethernet (Fiber)",
   1484 	  WM_T_I210,		WMP_F_FIBER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1487 	  "I210 Gigabit Ethernet (SERDES)",
   1488 	  WM_T_I210,		WMP_F_SERDES },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1491 	  "I210 Gigabit Ethernet (FLASH less)",
   1492 	  WM_T_I210,		WMP_F_SERDES },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1495 	  "I210 Gigabit Ethernet (SGMII)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1499 	  "I211 Ethernet (COPPER)",
   1500 	  WM_T_I211,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1502 	  "I217 V Ethernet Connection",
   1503 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1505 	  "I217 LM Ethernet Connection",
   1506 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1508 	  "I218 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1511 	  "I218 V Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1517 	  "I218 LM Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1520 	  "I218 LM Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1526 	  "I219 V Ethernet Connection",
   1527 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1529 	  "I219 V Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1553 	  "I219 V Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1556 	  "I219 V Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1564 	{ 0,			0,
   1565 	  NULL,
   1566 	  0,			0 },
   1567 };
   1568 
   1569 /*
   1570  * Register read/write functions.
   1571  * Other than CSR_{READ|WRITE}().
   1572  */
   1573 
   1574 #if 0 /* Not currently used */
   1575 static inline uint32_t
   1576 wm_io_read(struct wm_softc *sc, int reg)
   1577 {
   1578 
   1579 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1580 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1581 }
   1582 #endif
   1583 
   1584 static inline void
   1585 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1586 {
   1587 
   1588 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1589 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1590 }
   1591 
   1592 static inline void
   1593 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1594     uint32_t data)
   1595 {
   1596 	uint32_t regval;
   1597 	int i;
   1598 
   1599 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1600 
   1601 	CSR_WRITE(sc, reg, regval);
   1602 
   1603 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1604 		delay(5);
   1605 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1606 			break;
   1607 	}
   1608 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1609 		aprint_error("%s: WARNING:"
   1610 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1611 		    device_xname(sc->sc_dev), reg);
   1612 	}
   1613 }
   1614 
   1615 static inline void
   1616 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1617 {
   1618 	wa->wa_low = htole32(v & 0xffffffffU);
   1619 	if (sizeof(bus_addr_t) == 8)
   1620 		wa->wa_high = htole32((uint64_t) v >> 32);
   1621 	else
   1622 		wa->wa_high = 0;
   1623 }
   1624 
   1625 /*
   1626  * Descriptor sync/init functions.
   1627  */
   1628 static inline void
   1629 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1630 {
   1631 	struct wm_softc *sc = txq->txq_sc;
   1632 
   1633 	/* If it will wrap around, sync to the end of the ring. */
   1634 	if ((start + num) > WM_NTXDESC(txq)) {
   1635 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1636 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1637 		    (WM_NTXDESC(txq) - start), ops);
   1638 		num -= (WM_NTXDESC(txq) - start);
   1639 		start = 0;
   1640 	}
   1641 
   1642 	/* Now sync whatever is left. */
   1643 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1644 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1645 }
   1646 
   1647 static inline void
   1648 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1649 {
   1650 	struct wm_softc *sc = rxq->rxq_sc;
   1651 
   1652 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1653 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1654 }
   1655 
   1656 static inline void
   1657 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1658 {
   1659 	struct wm_softc *sc = rxq->rxq_sc;
   1660 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1661 	struct mbuf *m = rxs->rxs_mbuf;
   1662 
   1663 	/*
   1664 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1665 	 * so that the payload after the Ethernet header is aligned
   1666 	 * to a 4-byte boundary.
   1667 
   1668 	 * XXX BRAINDAMAGE ALERT!
   1669 	 * The stupid chip uses the same size for every buffer, which
   1670 	 * is set in the Receive Control register.  We are using the 2K
   1671 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1672 	 * reason, we can't "scoot" packets longer than the standard
   1673 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1674 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1675 	 * the upper layer copy the headers.
   1676 	 */
   1677 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1678 
   1679 	if (sc->sc_type == WM_T_82574) {
   1680 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1681 		rxd->erx_data.erxd_addr =
   1682 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1683 		rxd->erx_data.erxd_dd = 0;
   1684 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1685 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1686 
   1687 		rxd->nqrx_data.nrxd_paddr =
   1688 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1689 		/* Currently, split header is not supported. */
   1690 		rxd->nqrx_data.nrxd_haddr = 0;
   1691 	} else {
   1692 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1693 
   1694 		wm_set_dma_addr(&rxd->wrx_addr,
   1695 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1696 		rxd->wrx_len = 0;
   1697 		rxd->wrx_cksum = 0;
   1698 		rxd->wrx_status = 0;
   1699 		rxd->wrx_errors = 0;
   1700 		rxd->wrx_special = 0;
   1701 	}
   1702 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1703 
   1704 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1705 }
   1706 
   1707 /*
   1708  * Device driver interface functions and commonly used functions.
   1709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1710  */
   1711 
   1712 /* Lookup supported device table */
   1713 static const struct wm_product *
   1714 wm_lookup(const struct pci_attach_args *pa)
   1715 {
   1716 	const struct wm_product *wmp;
   1717 
   1718 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1719 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1720 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1721 			return wmp;
   1722 	}
   1723 	return NULL;
   1724 }
   1725 
   1726 /* The match function (ca_match) */
   1727 static int
   1728 wm_match(device_t parent, cfdata_t cf, void *aux)
   1729 {
   1730 	struct pci_attach_args *pa = aux;
   1731 
   1732 	if (wm_lookup(pa) != NULL)
   1733 		return 1;
   1734 
   1735 	return 0;
   1736 }
   1737 
   1738 /* The attach function (ca_attach) */
   1739 static void
   1740 wm_attach(device_t parent, device_t self, void *aux)
   1741 {
   1742 	struct wm_softc *sc = device_private(self);
   1743 	struct pci_attach_args *pa = aux;
   1744 	prop_dictionary_t dict;
   1745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1746 	pci_chipset_tag_t pc = pa->pa_pc;
   1747 	int counts[PCI_INTR_TYPE_SIZE];
   1748 	pci_intr_type_t max_type;
   1749 	const char *eetype, *xname;
   1750 	bus_space_tag_t memt;
   1751 	bus_space_handle_t memh;
   1752 	bus_size_t memsize;
   1753 	int memh_valid;
   1754 	int i, error;
   1755 	const struct wm_product *wmp;
   1756 	prop_data_t ea;
   1757 	prop_number_t pn;
   1758 	uint8_t enaddr[ETHER_ADDR_LEN];
   1759 	char buf[256];
   1760 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1761 	pcireg_t preg, memtype;
   1762 	uint16_t eeprom_data, apme_mask;
   1763 	bool force_clear_smbi;
   1764 	uint32_t link_mode;
   1765 	uint32_t reg;
   1766 
   1767 	sc->sc_dev = self;
   1768 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1769 	sc->sc_core_stopping = false;
   1770 
   1771 	wmp = wm_lookup(pa);
   1772 #ifdef DIAGNOSTIC
   1773 	if (wmp == NULL) {
   1774 		printf("\n");
   1775 		panic("wm_attach: impossible");
   1776 	}
   1777 #endif
   1778 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1779 
   1780 	sc->sc_pc = pa->pa_pc;
   1781 	sc->sc_pcitag = pa->pa_tag;
   1782 
   1783 	if (pci_dma64_available(pa))
   1784 		sc->sc_dmat = pa->pa_dmat64;
   1785 	else
   1786 		sc->sc_dmat = pa->pa_dmat;
   1787 
   1788 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1789 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1790 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1791 
   1792 	sc->sc_type = wmp->wmp_type;
   1793 
   1794 	/* Set default function pointers */
   1795 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1796 	sc->phy.release = sc->nvm.release = wm_put_null;
   1797 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1798 
   1799 	if (sc->sc_type < WM_T_82543) {
   1800 		if (sc->sc_rev < 2) {
   1801 			aprint_error_dev(sc->sc_dev,
   1802 			    "i82542 must be at least rev. 2\n");
   1803 			return;
   1804 		}
   1805 		if (sc->sc_rev < 3)
   1806 			sc->sc_type = WM_T_82542_2_0;
   1807 	}
   1808 
   1809 	/*
   1810 	 * Disable MSI for Errata:
   1811 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1812 	 *
   1813 	 *  82544: Errata 25
   1814 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1815 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1816 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1817 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1818 	 *
   1819 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1820 	 *
   1821 	 *  82571 & 82572: Errata 63
   1822 	 */
   1823 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1824 	    || (sc->sc_type == WM_T_82572))
   1825 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1826 
   1827 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1828 	    || (sc->sc_type == WM_T_82580)
   1829 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1830 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1831 		sc->sc_flags |= WM_F_NEWQUEUE;
   1832 
   1833 	/* Set device properties (mactype) */
   1834 	dict = device_properties(sc->sc_dev);
   1835 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1836 
   1837 	/*
   1838 	 * Map the device.  All devices support memory-mapped acccess,
   1839 	 * and it is really required for normal operation.
   1840 	 */
   1841 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1842 	switch (memtype) {
   1843 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1844 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1845 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1846 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1847 		break;
   1848 	default:
   1849 		memh_valid = 0;
   1850 		break;
   1851 	}
   1852 
   1853 	if (memh_valid) {
   1854 		sc->sc_st = memt;
   1855 		sc->sc_sh = memh;
   1856 		sc->sc_ss = memsize;
   1857 	} else {
   1858 		aprint_error_dev(sc->sc_dev,
   1859 		    "unable to map device registers\n");
   1860 		return;
   1861 	}
   1862 
   1863 	/*
   1864 	 * In addition, i82544 and later support I/O mapped indirect
   1865 	 * register access.  It is not desirable (nor supported in
   1866 	 * this driver) to use it for normal operation, though it is
   1867 	 * required to work around bugs in some chip versions.
   1868 	 */
   1869 	if (sc->sc_type >= WM_T_82544) {
   1870 		/* First we have to find the I/O BAR. */
   1871 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1872 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1873 			if (memtype == PCI_MAPREG_TYPE_IO)
   1874 				break;
   1875 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1876 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1877 				i += 4;	/* skip high bits, too */
   1878 		}
   1879 		if (i < PCI_MAPREG_END) {
   1880 			/*
   1881 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1882 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1883 			 * It's no problem because newer chips has no this
   1884 			 * bug.
   1885 			 *
   1886 			 * The i8254x doesn't apparently respond when the
   1887 			 * I/O BAR is 0, which looks somewhat like it's not
   1888 			 * been configured.
   1889 			 */
   1890 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1891 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1892 				aprint_error_dev(sc->sc_dev,
   1893 				    "WARNING: I/O BAR at zero.\n");
   1894 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1895 					0, &sc->sc_iot, &sc->sc_ioh,
   1896 					NULL, &sc->sc_ios) == 0) {
   1897 				sc->sc_flags |= WM_F_IOH_VALID;
   1898 			} else
   1899 				aprint_error_dev(sc->sc_dev,
   1900 				    "WARNING: unable to map I/O space\n");
   1901 		}
   1902 
   1903 	}
   1904 
   1905 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1906 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1907 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1908 	if (sc->sc_type < WM_T_82542_2_1)
   1909 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1910 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1911 
   1912 	/* power up chip */
   1913 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1914 	    && error != EOPNOTSUPP) {
   1915 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1916 		return;
   1917 	}
   1918 
   1919 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1920 	/*
   1921 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1922 	 * resource.
   1923 	 */
   1924 	if (sc->sc_nqueues > 1) {
   1925 		max_type = PCI_INTR_TYPE_MSIX;
   1926 		/*
   1927 		 *  82583 has a MSI-X capability in the PCI configuration space
   1928 		 * but it doesn't support it. At least the document doesn't
   1929 		 * say anything about MSI-X.
   1930 		 */
   1931 		counts[PCI_INTR_TYPE_MSIX]
   1932 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1933 	} else {
   1934 		max_type = PCI_INTR_TYPE_MSI;
   1935 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1936 	}
   1937 
   1938 	/* Allocation settings */
   1939 	counts[PCI_INTR_TYPE_MSI] = 1;
   1940 	counts[PCI_INTR_TYPE_INTX] = 1;
   1941 	/* overridden by disable flags */
   1942 	if (wm_disable_msi != 0) {
   1943 		counts[PCI_INTR_TYPE_MSI] = 0;
   1944 		if (wm_disable_msix != 0) {
   1945 			max_type = PCI_INTR_TYPE_INTX;
   1946 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1947 		}
   1948 	} else if (wm_disable_msix != 0) {
   1949 		max_type = PCI_INTR_TYPE_MSI;
   1950 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1951 	}
   1952 
   1953 alloc_retry:
   1954 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1955 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1956 		return;
   1957 	}
   1958 
   1959 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1960 		error = wm_setup_msix(sc);
   1961 		if (error) {
   1962 			pci_intr_release(pc, sc->sc_intrs,
   1963 			    counts[PCI_INTR_TYPE_MSIX]);
   1964 
   1965 			/* Setup for MSI: Disable MSI-X */
   1966 			max_type = PCI_INTR_TYPE_MSI;
   1967 			counts[PCI_INTR_TYPE_MSI] = 1;
   1968 			counts[PCI_INTR_TYPE_INTX] = 1;
   1969 			goto alloc_retry;
   1970 		}
   1971 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1972 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1973 		error = wm_setup_legacy(sc);
   1974 		if (error) {
   1975 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1976 			    counts[PCI_INTR_TYPE_MSI]);
   1977 
   1978 			/* The next try is for INTx: Disable MSI */
   1979 			max_type = PCI_INTR_TYPE_INTX;
   1980 			counts[PCI_INTR_TYPE_INTX] = 1;
   1981 			goto alloc_retry;
   1982 		}
   1983 	} else {
   1984 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1985 		error = wm_setup_legacy(sc);
   1986 		if (error) {
   1987 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1988 			    counts[PCI_INTR_TYPE_INTX]);
   1989 			return;
   1990 		}
   1991 	}
   1992 
   1993 	/*
   1994 	 * Check the function ID (unit number of the chip).
   1995 	 */
   1996 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1997 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1998 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1999 	    || (sc->sc_type == WM_T_82580)
   2000 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2001 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2002 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2003 	else
   2004 		sc->sc_funcid = 0;
   2005 
   2006 	/*
   2007 	 * Determine a few things about the bus we're connected to.
   2008 	 */
   2009 	if (sc->sc_type < WM_T_82543) {
   2010 		/* We don't really know the bus characteristics here. */
   2011 		sc->sc_bus_speed = 33;
   2012 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2013 		/*
   2014 		 * CSA (Communication Streaming Architecture) is about as fast
   2015 		 * a 32-bit 66MHz PCI Bus.
   2016 		 */
   2017 		sc->sc_flags |= WM_F_CSA;
   2018 		sc->sc_bus_speed = 66;
   2019 		aprint_verbose_dev(sc->sc_dev,
   2020 		    "Communication Streaming Architecture\n");
   2021 		if (sc->sc_type == WM_T_82547) {
   2022 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2023 			callout_setfunc(&sc->sc_txfifo_ch,
   2024 			    wm_82547_txfifo_stall, sc);
   2025 			aprint_verbose_dev(sc->sc_dev,
   2026 			    "using 82547 Tx FIFO stall work-around\n");
   2027 		}
   2028 	} else if (sc->sc_type >= WM_T_82571) {
   2029 		sc->sc_flags |= WM_F_PCIE;
   2030 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2031 		    && (sc->sc_type != WM_T_ICH10)
   2032 		    && (sc->sc_type != WM_T_PCH)
   2033 		    && (sc->sc_type != WM_T_PCH2)
   2034 		    && (sc->sc_type != WM_T_PCH_LPT)
   2035 		    && (sc->sc_type != WM_T_PCH_SPT)
   2036 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2037 			/* ICH* and PCH* have no PCIe capability registers */
   2038 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2039 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2040 				NULL) == 0)
   2041 				aprint_error_dev(sc->sc_dev,
   2042 				    "unable to find PCIe capability\n");
   2043 		}
   2044 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2045 	} else {
   2046 		reg = CSR_READ(sc, WMREG_STATUS);
   2047 		if (reg & STATUS_BUS64)
   2048 			sc->sc_flags |= WM_F_BUS64;
   2049 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2050 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2051 
   2052 			sc->sc_flags |= WM_F_PCIX;
   2053 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2054 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2055 				aprint_error_dev(sc->sc_dev,
   2056 				    "unable to find PCIX capability\n");
   2057 			else if (sc->sc_type != WM_T_82545_3 &&
   2058 				 sc->sc_type != WM_T_82546_3) {
   2059 				/*
   2060 				 * Work around a problem caused by the BIOS
   2061 				 * setting the max memory read byte count
   2062 				 * incorrectly.
   2063 				 */
   2064 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2065 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2066 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2067 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2068 
   2069 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2070 				    PCIX_CMD_BYTECNT_SHIFT;
   2071 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2072 				    PCIX_STATUS_MAXB_SHIFT;
   2073 				if (bytecnt > maxb) {
   2074 					aprint_verbose_dev(sc->sc_dev,
   2075 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2076 					    512 << bytecnt, 512 << maxb);
   2077 					pcix_cmd = (pcix_cmd &
   2078 					    ~PCIX_CMD_BYTECNT_MASK) |
   2079 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2080 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2081 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2082 					    pcix_cmd);
   2083 				}
   2084 			}
   2085 		}
   2086 		/*
   2087 		 * The quad port adapter is special; it has a PCIX-PCIX
   2088 		 * bridge on the board, and can run the secondary bus at
   2089 		 * a higher speed.
   2090 		 */
   2091 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2092 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2093 								      : 66;
   2094 		} else if (sc->sc_flags & WM_F_PCIX) {
   2095 			switch (reg & STATUS_PCIXSPD_MASK) {
   2096 			case STATUS_PCIXSPD_50_66:
   2097 				sc->sc_bus_speed = 66;
   2098 				break;
   2099 			case STATUS_PCIXSPD_66_100:
   2100 				sc->sc_bus_speed = 100;
   2101 				break;
   2102 			case STATUS_PCIXSPD_100_133:
   2103 				sc->sc_bus_speed = 133;
   2104 				break;
   2105 			default:
   2106 				aprint_error_dev(sc->sc_dev,
   2107 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2108 				    reg & STATUS_PCIXSPD_MASK);
   2109 				sc->sc_bus_speed = 66;
   2110 				break;
   2111 			}
   2112 		} else
   2113 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2114 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2115 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2116 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2117 	}
   2118 
   2119 	/* clear interesting stat counters */
   2120 	CSR_READ(sc, WMREG_COLC);
   2121 	CSR_READ(sc, WMREG_RXERRC);
   2122 
   2123 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2124 	    || (sc->sc_type >= WM_T_ICH8))
   2125 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2126 	if (sc->sc_type >= WM_T_ICH8)
   2127 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2128 
   2129 	/* Set PHY, NVM mutex related stuff */
   2130 	switch (sc->sc_type) {
   2131 	case WM_T_82542_2_0:
   2132 	case WM_T_82542_2_1:
   2133 	case WM_T_82543:
   2134 	case WM_T_82544:
   2135 		/* Microwire */
   2136 		sc->nvm.read = wm_nvm_read_uwire;
   2137 		sc->sc_nvm_wordsize = 64;
   2138 		sc->sc_nvm_addrbits = 6;
   2139 		break;
   2140 	case WM_T_82540:
   2141 	case WM_T_82545:
   2142 	case WM_T_82545_3:
   2143 	case WM_T_82546:
   2144 	case WM_T_82546_3:
   2145 		/* Microwire */
   2146 		sc->nvm.read = wm_nvm_read_uwire;
   2147 		reg = CSR_READ(sc, WMREG_EECD);
   2148 		if (reg & EECD_EE_SIZE) {
   2149 			sc->sc_nvm_wordsize = 256;
   2150 			sc->sc_nvm_addrbits = 8;
   2151 		} else {
   2152 			sc->sc_nvm_wordsize = 64;
   2153 			sc->sc_nvm_addrbits = 6;
   2154 		}
   2155 		sc->sc_flags |= WM_F_LOCK_EECD;
   2156 		sc->nvm.acquire = wm_get_eecd;
   2157 		sc->nvm.release = wm_put_eecd;
   2158 		break;
   2159 	case WM_T_82541:
   2160 	case WM_T_82541_2:
   2161 	case WM_T_82547:
   2162 	case WM_T_82547_2:
   2163 		reg = CSR_READ(sc, WMREG_EECD);
   2164 		/*
   2165 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2166 		 * on 8254[17], so set flags and functios before calling it.
   2167 		 */
   2168 		sc->sc_flags |= WM_F_LOCK_EECD;
   2169 		sc->nvm.acquire = wm_get_eecd;
   2170 		sc->nvm.release = wm_put_eecd;
   2171 		if (reg & EECD_EE_TYPE) {
   2172 			/* SPI */
   2173 			sc->nvm.read = wm_nvm_read_spi;
   2174 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 		} else {
   2177 			/* Microwire */
   2178 			sc->nvm.read = wm_nvm_read_uwire;
   2179 			if ((reg & EECD_EE_ABITS) != 0) {
   2180 				sc->sc_nvm_wordsize = 256;
   2181 				sc->sc_nvm_addrbits = 8;
   2182 			} else {
   2183 				sc->sc_nvm_wordsize = 64;
   2184 				sc->sc_nvm_addrbits = 6;
   2185 			}
   2186 		}
   2187 		break;
   2188 	case WM_T_82571:
   2189 	case WM_T_82572:
   2190 		/* SPI */
   2191 		sc->nvm.read = wm_nvm_read_eerd;
   2192 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2193 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2194 		wm_nvm_set_addrbits_size_eecd(sc);
   2195 		sc->phy.acquire = wm_get_swsm_semaphore;
   2196 		sc->phy.release = wm_put_swsm_semaphore;
   2197 		sc->nvm.acquire = wm_get_nvm_82571;
   2198 		sc->nvm.release = wm_put_nvm_82571;
   2199 		break;
   2200 	case WM_T_82573:
   2201 	case WM_T_82574:
   2202 	case WM_T_82583:
   2203 		sc->nvm.read = wm_nvm_read_eerd;
   2204 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2205 		if (sc->sc_type == WM_T_82573) {
   2206 			sc->phy.acquire = wm_get_swsm_semaphore;
   2207 			sc->phy.release = wm_put_swsm_semaphore;
   2208 			sc->nvm.acquire = wm_get_nvm_82571;
   2209 			sc->nvm.release = wm_put_nvm_82571;
   2210 		} else {
   2211 			/* Both PHY and NVM use the same semaphore. */
   2212 			sc->phy.acquire = sc->nvm.acquire
   2213 			    = wm_get_swfwhw_semaphore;
   2214 			sc->phy.release = sc->nvm.release
   2215 			    = wm_put_swfwhw_semaphore;
   2216 		}
   2217 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2218 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2219 			sc->sc_nvm_wordsize = 2048;
   2220 		} else {
   2221 			/* SPI */
   2222 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2223 			wm_nvm_set_addrbits_size_eecd(sc);
   2224 		}
   2225 		break;
   2226 	case WM_T_82575:
   2227 	case WM_T_82576:
   2228 	case WM_T_82580:
   2229 	case WM_T_I350:
   2230 	case WM_T_I354:
   2231 	case WM_T_80003:
   2232 		/* SPI */
   2233 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2234 		wm_nvm_set_addrbits_size_eecd(sc);
   2235 		if ((sc->sc_type == WM_T_80003)
   2236 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2237 			sc->nvm.read = wm_nvm_read_eerd;
   2238 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2239 		} else {
   2240 			sc->nvm.read = wm_nvm_read_spi;
   2241 			sc->sc_flags |= WM_F_LOCK_EECD;
   2242 		}
   2243 		sc->phy.acquire = wm_get_phy_82575;
   2244 		sc->phy.release = wm_put_phy_82575;
   2245 		sc->nvm.acquire = wm_get_nvm_80003;
   2246 		sc->nvm.release = wm_put_nvm_80003;
   2247 		break;
   2248 	case WM_T_ICH8:
   2249 	case WM_T_ICH9:
   2250 	case WM_T_ICH10:
   2251 	case WM_T_PCH:
   2252 	case WM_T_PCH2:
   2253 	case WM_T_PCH_LPT:
   2254 		sc->nvm.read = wm_nvm_read_ich8;
   2255 		/* FLASH */
   2256 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2257 		sc->sc_nvm_wordsize = 2048;
   2258 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2259 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2260 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2261 			aprint_error_dev(sc->sc_dev,
   2262 			    "can't map FLASH registers\n");
   2263 			goto out;
   2264 		}
   2265 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2266 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2267 		    ICH_FLASH_SECTOR_SIZE;
   2268 		sc->sc_ich8_flash_bank_size =
   2269 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2270 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2271 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2272 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2273 		sc->sc_flashreg_offset = 0;
   2274 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2275 		sc->phy.release = wm_put_swflag_ich8lan;
   2276 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2277 		sc->nvm.release = wm_put_nvm_ich8lan;
   2278 		break;
   2279 	case WM_T_PCH_SPT:
   2280 	case WM_T_PCH_CNP:
   2281 		sc->nvm.read = wm_nvm_read_spt;
   2282 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2283 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2284 		sc->sc_flasht = sc->sc_st;
   2285 		sc->sc_flashh = sc->sc_sh;
   2286 		sc->sc_ich8_flash_base = 0;
   2287 		sc->sc_nvm_wordsize =
   2288 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2289 		    * NVM_SIZE_MULTIPLIER;
   2290 		/* It is size in bytes, we want words */
   2291 		sc->sc_nvm_wordsize /= 2;
   2292 		/* assume 2 banks */
   2293 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2294 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2295 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2296 		sc->phy.release = wm_put_swflag_ich8lan;
   2297 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2298 		sc->nvm.release = wm_put_nvm_ich8lan;
   2299 		break;
   2300 	case WM_T_I210:
   2301 	case WM_T_I211:
   2302 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2303 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2304 		if (wm_nvm_flash_presence_i210(sc)) {
   2305 			sc->nvm.read = wm_nvm_read_eerd;
   2306 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2307 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2308 			wm_nvm_set_addrbits_size_eecd(sc);
   2309 		} else {
   2310 			sc->nvm.read = wm_nvm_read_invm;
   2311 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2312 			sc->sc_nvm_wordsize = INVM_SIZE;
   2313 		}
   2314 		sc->phy.acquire = wm_get_phy_82575;
   2315 		sc->phy.release = wm_put_phy_82575;
   2316 		sc->nvm.acquire = wm_get_nvm_80003;
   2317 		sc->nvm.release = wm_put_nvm_80003;
   2318 		break;
   2319 	default:
   2320 		break;
   2321 	}
   2322 
   2323 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2324 	switch (sc->sc_type) {
   2325 	case WM_T_82571:
   2326 	case WM_T_82572:
   2327 		reg = CSR_READ(sc, WMREG_SWSM2);
   2328 		if ((reg & SWSM2_LOCK) == 0) {
   2329 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2330 			force_clear_smbi = true;
   2331 		} else
   2332 			force_clear_smbi = false;
   2333 		break;
   2334 	case WM_T_82573:
   2335 	case WM_T_82574:
   2336 	case WM_T_82583:
   2337 		force_clear_smbi = true;
   2338 		break;
   2339 	default:
   2340 		force_clear_smbi = false;
   2341 		break;
   2342 	}
   2343 	if (force_clear_smbi) {
   2344 		reg = CSR_READ(sc, WMREG_SWSM);
   2345 		if ((reg & SWSM_SMBI) != 0)
   2346 			aprint_error_dev(sc->sc_dev,
   2347 			    "Please update the Bootagent\n");
   2348 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2349 	}
   2350 
   2351 	/*
   2352 	 * Defer printing the EEPROM type until after verifying the checksum
   2353 	 * This allows the EEPROM type to be printed correctly in the case
   2354 	 * that no EEPROM is attached.
   2355 	 */
   2356 	/*
   2357 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2358 	 * this for later, so we can fail future reads from the EEPROM.
   2359 	 */
   2360 	if (wm_nvm_validate_checksum(sc)) {
   2361 		/*
   2362 		 * Read twice again because some PCI-e parts fail the
   2363 		 * first check due to the link being in sleep state.
   2364 		 */
   2365 		if (wm_nvm_validate_checksum(sc))
   2366 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2367 	}
   2368 
   2369 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2370 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2371 	else {
   2372 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2373 		    sc->sc_nvm_wordsize);
   2374 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2375 			aprint_verbose("iNVM");
   2376 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2377 			aprint_verbose("FLASH(HW)");
   2378 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2379 			aprint_verbose("FLASH");
   2380 		else {
   2381 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2382 				eetype = "SPI";
   2383 			else
   2384 				eetype = "MicroWire";
   2385 			aprint_verbose("(%d address bits) %s EEPROM",
   2386 			    sc->sc_nvm_addrbits, eetype);
   2387 		}
   2388 	}
   2389 	wm_nvm_version(sc);
   2390 	aprint_verbose("\n");
   2391 
   2392 	/*
   2393 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2394 	 * incorrect.
   2395 	 */
   2396 	wm_gmii_setup_phytype(sc, 0, 0);
   2397 
   2398 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2399 	switch (sc->sc_type) {
   2400 	case WM_T_ICH8:
   2401 	case WM_T_ICH9:
   2402 	case WM_T_ICH10:
   2403 	case WM_T_PCH:
   2404 	case WM_T_PCH2:
   2405 	case WM_T_PCH_LPT:
   2406 	case WM_T_PCH_SPT:
   2407 	case WM_T_PCH_CNP:
   2408 		apme_mask = WUC_APME;
   2409 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2410 		if ((eeprom_data & apme_mask) != 0)
   2411 			sc->sc_flags |= WM_F_WOL;
   2412 		break;
   2413 	default:
   2414 		break;
   2415 	}
   2416 
   2417 	/* Reset the chip to a known state. */
   2418 	wm_reset(sc);
   2419 
   2420 	/*
   2421 	 * Check for I21[01] PLL workaround.
   2422 	 *
   2423 	 * Three cases:
   2424 	 * a) Chip is I211.
   2425 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2426 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2427 	 */
   2428 	if (sc->sc_type == WM_T_I211)
   2429 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2430 	if (sc->sc_type == WM_T_I210) {
   2431 		if (!wm_nvm_flash_presence_i210(sc))
   2432 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2433 		else if ((sc->sc_nvm_ver_major < 3)
   2434 		    || ((sc->sc_nvm_ver_major == 3)
   2435 			&& (sc->sc_nvm_ver_minor < 25))) {
   2436 			aprint_verbose_dev(sc->sc_dev,
   2437 			    "ROM image version %d.%d is older than 3.25\n",
   2438 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2439 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2440 		}
   2441 	}
   2442 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2443 		wm_pll_workaround_i210(sc);
   2444 
   2445 	wm_get_wakeup(sc);
   2446 
   2447 	/* Non-AMT based hardware can now take control from firmware */
   2448 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2449 		wm_get_hw_control(sc);
   2450 
   2451 	/*
   2452 	 * Read the Ethernet address from the EEPROM, if not first found
   2453 	 * in device properties.
   2454 	 */
   2455 	ea = prop_dictionary_get(dict, "mac-address");
   2456 	if (ea != NULL) {
   2457 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2458 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2459 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2460 	} else {
   2461 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2462 			aprint_error_dev(sc->sc_dev,
   2463 			    "unable to read Ethernet address\n");
   2464 			goto out;
   2465 		}
   2466 	}
   2467 
   2468 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2469 	    ether_sprintf(enaddr));
   2470 
   2471 	/*
   2472 	 * Read the config info from the EEPROM, and set up various
   2473 	 * bits in the control registers based on their contents.
   2474 	 */
   2475 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2476 	if (pn != NULL) {
   2477 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2478 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2479 	} else {
   2480 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2481 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2487 	if (pn != NULL) {
   2488 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2489 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2490 	} else {
   2491 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2492 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2493 			goto out;
   2494 		}
   2495 	}
   2496 
   2497 	/* check for WM_F_WOL */
   2498 	switch (sc->sc_type) {
   2499 	case WM_T_82542_2_0:
   2500 	case WM_T_82542_2_1:
   2501 	case WM_T_82543:
   2502 		/* dummy? */
   2503 		eeprom_data = 0;
   2504 		apme_mask = NVM_CFG3_APME;
   2505 		break;
   2506 	case WM_T_82544:
   2507 		apme_mask = NVM_CFG2_82544_APM_EN;
   2508 		eeprom_data = cfg2;
   2509 		break;
   2510 	case WM_T_82546:
   2511 	case WM_T_82546_3:
   2512 	case WM_T_82571:
   2513 	case WM_T_82572:
   2514 	case WM_T_82573:
   2515 	case WM_T_82574:
   2516 	case WM_T_82583:
   2517 	case WM_T_80003:
   2518 	case WM_T_82575:
   2519 	case WM_T_82576:
   2520 		apme_mask = NVM_CFG3_APME;
   2521 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2522 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2523 		break;
   2524 	case WM_T_82580:
   2525 	case WM_T_I350:
   2526 	case WM_T_I354:
   2527 	case WM_T_I210:
   2528 	case WM_T_I211:
   2529 		apme_mask = NVM_CFG3_APME;
   2530 		wm_nvm_read(sc,
   2531 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2532 		    1, &eeprom_data);
   2533 		break;
   2534 	case WM_T_ICH8:
   2535 	case WM_T_ICH9:
   2536 	case WM_T_ICH10:
   2537 	case WM_T_PCH:
   2538 	case WM_T_PCH2:
   2539 	case WM_T_PCH_LPT:
   2540 	case WM_T_PCH_SPT:
   2541 	case WM_T_PCH_CNP:
   2542 		/* Already checked before wm_reset () */
   2543 		apme_mask = eeprom_data = 0;
   2544 		break;
   2545 	default: /* XXX 82540 */
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2548 		break;
   2549 	}
   2550 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2551 	if ((eeprom_data & apme_mask) != 0)
   2552 		sc->sc_flags |= WM_F_WOL;
   2553 
   2554 	/*
   2555 	 * We have the eeprom settings, now apply the special cases
   2556 	 * where the eeprom may be wrong or the board won't support
   2557 	 * wake on lan on a particular port
   2558 	 */
   2559 	switch (sc->sc_pcidevid) {
   2560 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2561 		sc->sc_flags &= ~WM_F_WOL;
   2562 		break;
   2563 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2564 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2565 		/* Wake events only supported on port A for dual fiber
   2566 		 * regardless of eeprom setting */
   2567 		if (sc->sc_funcid == 1)
   2568 			sc->sc_flags &= ~WM_F_WOL;
   2569 		break;
   2570 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2571 		/* if quad port adapter, disable WoL on all but port A */
   2572 		if (sc->sc_funcid != 0)
   2573 			sc->sc_flags &= ~WM_F_WOL;
   2574 		break;
   2575 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2576 		/* Wake events only supported on port A for dual fiber
   2577 		 * regardless of eeprom setting */
   2578 		if (sc->sc_funcid == 1)
   2579 			sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2582 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2583 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2584 		/* if quad port adapter, disable WoL on all but port A */
   2585 		if (sc->sc_funcid != 0)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	}
   2589 
   2590 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2591 		/* Check NVM for autonegotiation */
   2592 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2593 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2594 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2595 		}
   2596 	}
   2597 
   2598 	/*
   2599 	 * XXX need special handling for some multiple port cards
   2600 	 * to disable a paticular port.
   2601 	 */
   2602 
   2603 	if (sc->sc_type >= WM_T_82544) {
   2604 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2605 		if (pn != NULL) {
   2606 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2607 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2608 		} else {
   2609 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2610 				aprint_error_dev(sc->sc_dev,
   2611 				    "unable to read SWDPIN\n");
   2612 				goto out;
   2613 			}
   2614 		}
   2615 	}
   2616 
   2617 	if (cfg1 & NVM_CFG1_ILOS)
   2618 		sc->sc_ctrl |= CTRL_ILOS;
   2619 
   2620 	/*
   2621 	 * XXX
   2622 	 * This code isn't correct because pin 2 and 3 are located
   2623 	 * in different position on newer chips. Check all datasheet.
   2624 	 *
   2625 	 * Until resolve this problem, check if a chip < 82580
   2626 	 */
   2627 	if (sc->sc_type <= WM_T_82580) {
   2628 		if (sc->sc_type >= WM_T_82544) {
   2629 			sc->sc_ctrl |=
   2630 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2631 			    CTRL_SWDPIO_SHIFT;
   2632 			sc->sc_ctrl |=
   2633 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2634 			    CTRL_SWDPINS_SHIFT;
   2635 		} else {
   2636 			sc->sc_ctrl |=
   2637 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2638 			    CTRL_SWDPIO_SHIFT;
   2639 		}
   2640 	}
   2641 
   2642 	/* XXX For other than 82580? */
   2643 	if (sc->sc_type == WM_T_82580) {
   2644 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2645 		if (nvmword & __BIT(13))
   2646 			sc->sc_ctrl |= CTRL_ILOS;
   2647 	}
   2648 
   2649 #if 0
   2650 	if (sc->sc_type >= WM_T_82544) {
   2651 		if (cfg1 & NVM_CFG1_IPS0)
   2652 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2653 		if (cfg1 & NVM_CFG1_IPS1)
   2654 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2655 		sc->sc_ctrl_ext |=
   2656 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2657 		    CTRL_EXT_SWDPIO_SHIFT;
   2658 		sc->sc_ctrl_ext |=
   2659 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2660 		    CTRL_EXT_SWDPINS_SHIFT;
   2661 	} else {
   2662 		sc->sc_ctrl_ext |=
   2663 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2664 		    CTRL_EXT_SWDPIO_SHIFT;
   2665 	}
   2666 #endif
   2667 
   2668 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2669 #if 0
   2670 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2671 #endif
   2672 
   2673 	if (sc->sc_type == WM_T_PCH) {
   2674 		uint16_t val;
   2675 
   2676 		/* Save the NVM K1 bit setting */
   2677 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2678 
   2679 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2680 			sc->sc_nvm_k1_enabled = 1;
   2681 		else
   2682 			sc->sc_nvm_k1_enabled = 0;
   2683 	}
   2684 
   2685 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2686 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2687 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2688 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2689 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2690 	    || sc->sc_type == WM_T_82573
   2691 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2692 		/* Copper only */
   2693 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2694 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2695 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2696 	    || (sc->sc_type ==WM_T_I211)) {
   2697 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2698 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2699 		switch (link_mode) {
   2700 		case CTRL_EXT_LINK_MODE_1000KX:
   2701 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2702 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2703 			break;
   2704 		case CTRL_EXT_LINK_MODE_SGMII:
   2705 			if (wm_sgmii_uses_mdio(sc)) {
   2706 				aprint_verbose_dev(sc->sc_dev,
   2707 				    "SGMII(MDIO)\n");
   2708 				sc->sc_flags |= WM_F_SGMII;
   2709 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2710 				break;
   2711 			}
   2712 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2713 			/*FALLTHROUGH*/
   2714 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2715 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2716 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2717 				if (link_mode
   2718 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2719 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2720 					sc->sc_flags |= WM_F_SGMII;
   2721 				} else {
   2722 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2723 					aprint_verbose_dev(sc->sc_dev,
   2724 					    "SERDES\n");
   2725 				}
   2726 				break;
   2727 			}
   2728 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2729 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2730 
   2731 			/* Change current link mode setting */
   2732 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2733 			switch (sc->sc_mediatype) {
   2734 			case WM_MEDIATYPE_COPPER:
   2735 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2736 				break;
   2737 			case WM_MEDIATYPE_SERDES:
   2738 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2739 				break;
   2740 			default:
   2741 				break;
   2742 			}
   2743 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2744 			break;
   2745 		case CTRL_EXT_LINK_MODE_GMII:
   2746 		default:
   2747 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2748 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2749 			break;
   2750 		}
   2751 
   2752 		reg &= ~CTRL_EXT_I2C_ENA;
   2753 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2754 			reg |= CTRL_EXT_I2C_ENA;
   2755 		else
   2756 			reg &= ~CTRL_EXT_I2C_ENA;
   2757 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2758 	} else if (sc->sc_type < WM_T_82543 ||
   2759 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2760 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2761 			aprint_error_dev(sc->sc_dev,
   2762 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2763 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2764 		}
   2765 	} else {
   2766 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2767 			aprint_error_dev(sc->sc_dev,
   2768 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2769 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2770 		}
   2771 	}
   2772 
   2773 	if (sc->sc_type >= WM_T_PCH2)
   2774 		sc->sc_flags |= WM_F_EEE;
   2775 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2776 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2777 		/* XXX: Need special handling for I354. (not yet) */
   2778 		if (sc->sc_type != WM_T_I354)
   2779 			sc->sc_flags |= WM_F_EEE;
   2780 	}
   2781 
   2782 	/* Set device properties (macflags) */
   2783 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2784 
   2785 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2786 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2787 
   2788 	/* Initialize the media structures accordingly. */
   2789 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2790 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2791 	else
   2792 		wm_tbi_mediainit(sc); /* All others */
   2793 
   2794 	ifp = &sc->sc_ethercom.ec_if;
   2795 	xname = device_xname(sc->sc_dev);
   2796 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2797 	ifp->if_softc = sc;
   2798 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2799 #ifdef WM_MPSAFE
   2800 	ifp->if_extflags = IFEF_MPSAFE;
   2801 #endif
   2802 	ifp->if_ioctl = wm_ioctl;
   2803 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2804 		ifp->if_start = wm_nq_start;
   2805 		/*
   2806 		 * When the number of CPUs is one and the controller can use
   2807 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2808 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2809 		 * and the other is used for link status changing.
   2810 		 * In this situation, wm_nq_transmit() is disadvantageous
   2811 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2812 		 */
   2813 		if (wm_is_using_multiqueue(sc))
   2814 			ifp->if_transmit = wm_nq_transmit;
   2815 	} else {
   2816 		ifp->if_start = wm_start;
   2817 		/*
   2818 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2819 		 */
   2820 		if (wm_is_using_multiqueue(sc))
   2821 			ifp->if_transmit = wm_transmit;
   2822 	}
   2823 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2824 	ifp->if_init = wm_init;
   2825 	ifp->if_stop = wm_stop;
   2826 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2827 	IFQ_SET_READY(&ifp->if_snd);
   2828 
   2829 	/* Check for jumbo frame */
   2830 	switch (sc->sc_type) {
   2831 	case WM_T_82573:
   2832 		/* XXX limited to 9234 if ASPM is disabled */
   2833 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2834 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2835 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2836 		break;
   2837 	case WM_T_82571:
   2838 	case WM_T_82572:
   2839 	case WM_T_82574:
   2840 	case WM_T_82583:
   2841 	case WM_T_82575:
   2842 	case WM_T_82576:
   2843 	case WM_T_82580:
   2844 	case WM_T_I350:
   2845 	case WM_T_I354:
   2846 	case WM_T_I210:
   2847 	case WM_T_I211:
   2848 	case WM_T_80003:
   2849 	case WM_T_ICH9:
   2850 	case WM_T_ICH10:
   2851 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2852 	case WM_T_PCH_LPT:
   2853 	case WM_T_PCH_SPT:
   2854 	case WM_T_PCH_CNP:
   2855 		/* XXX limited to 9234 */
   2856 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2857 		break;
   2858 	case WM_T_PCH:
   2859 		/* XXX limited to 4096 */
   2860 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2861 		break;
   2862 	case WM_T_82542_2_0:
   2863 	case WM_T_82542_2_1:
   2864 	case WM_T_ICH8:
   2865 		/* No support for jumbo frame */
   2866 		break;
   2867 	default:
   2868 		/* ETHER_MAX_LEN_JUMBO */
   2869 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2870 		break;
   2871 	}
   2872 
   2873 	/* If we're a i82543 or greater, we can support VLANs. */
   2874 	if (sc->sc_type >= WM_T_82543)
   2875 		sc->sc_ethercom.ec_capabilities |=
   2876 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2877 
   2878 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2879 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2880 
   2881 	/*
   2882 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2883 	 * on i82543 and later.
   2884 	 */
   2885 	if (sc->sc_type >= WM_T_82543) {
   2886 		ifp->if_capabilities |=
   2887 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2888 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2889 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2890 		    IFCAP_CSUM_TCPv6_Tx |
   2891 		    IFCAP_CSUM_UDPv6_Tx;
   2892 	}
   2893 
   2894 	/*
   2895 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2896 	 *
   2897 	 *	82541GI (8086:1076) ... no
   2898 	 *	82572EI (8086:10b9) ... yes
   2899 	 */
   2900 	if (sc->sc_type >= WM_T_82571) {
   2901 		ifp->if_capabilities |=
   2902 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2903 	}
   2904 
   2905 	/*
   2906 	 * If we're a i82544 or greater (except i82547), we can do
   2907 	 * TCP segmentation offload.
   2908 	 */
   2909 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2910 		ifp->if_capabilities |= IFCAP_TSOv4;
   2911 	}
   2912 
   2913 	if (sc->sc_type >= WM_T_82571) {
   2914 		ifp->if_capabilities |= IFCAP_TSOv6;
   2915 	}
   2916 
   2917 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2918 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2919 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2920 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2921 
   2922 #ifdef WM_MPSAFE
   2923 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2924 #else
   2925 	sc->sc_core_lock = NULL;
   2926 #endif
   2927 
   2928 	/* Attach the interface. */
   2929 	error = if_initialize(ifp);
   2930 	if (error != 0) {
   2931 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2932 		    error);
   2933 		return; /* Error */
   2934 	}
   2935 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2936 	ether_ifattach(ifp, enaddr);
   2937 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2938 	if_register(ifp);
   2939 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2940 	    RND_FLAG_DEFAULT);
   2941 
   2942 #ifdef WM_EVENT_COUNTERS
   2943 	/* Attach event counters. */
   2944 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2945 	    NULL, xname, "linkintr");
   2946 
   2947 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2948 	    NULL, xname, "tx_xoff");
   2949 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2950 	    NULL, xname, "tx_xon");
   2951 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2952 	    NULL, xname, "rx_xoff");
   2953 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2954 	    NULL, xname, "rx_xon");
   2955 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2956 	    NULL, xname, "rx_macctl");
   2957 #endif /* WM_EVENT_COUNTERS */
   2958 
   2959 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2960 		pmf_class_network_register(self, ifp);
   2961 	else
   2962 		aprint_error_dev(self, "couldn't establish power handler\n");
   2963 
   2964 	sc->sc_flags |= WM_F_ATTACHED;
   2965 out:
   2966 	return;
   2967 }
   2968 
   2969 /* The detach function (ca_detach) */
   2970 static int
   2971 wm_detach(device_t self, int flags __unused)
   2972 {
   2973 	struct wm_softc *sc = device_private(self);
   2974 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2975 	int i;
   2976 
   2977 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2978 		return 0;
   2979 
   2980 	/* Stop the interface. Callouts are stopped in it. */
   2981 	wm_stop(ifp, 1);
   2982 
   2983 	pmf_device_deregister(self);
   2984 
   2985 #ifdef WM_EVENT_COUNTERS
   2986 	evcnt_detach(&sc->sc_ev_linkintr);
   2987 
   2988 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2989 	evcnt_detach(&sc->sc_ev_tx_xon);
   2990 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2991 	evcnt_detach(&sc->sc_ev_rx_xon);
   2992 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2993 #endif /* WM_EVENT_COUNTERS */
   2994 
   2995 	/* Tell the firmware about the release */
   2996 	WM_CORE_LOCK(sc);
   2997 	wm_release_manageability(sc);
   2998 	wm_release_hw_control(sc);
   2999 	wm_enable_wakeup(sc);
   3000 	WM_CORE_UNLOCK(sc);
   3001 
   3002 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3003 
   3004 	/* Delete all remaining media. */
   3005 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3006 
   3007 	ether_ifdetach(ifp);
   3008 	if_detach(ifp);
   3009 	if_percpuq_destroy(sc->sc_ipq);
   3010 
   3011 	/* Unload RX dmamaps and free mbufs */
   3012 	for (i = 0; i < sc->sc_nqueues; i++) {
   3013 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3014 		mutex_enter(rxq->rxq_lock);
   3015 		wm_rxdrain(rxq);
   3016 		mutex_exit(rxq->rxq_lock);
   3017 	}
   3018 	/* Must unlock here */
   3019 
   3020 	/* Disestablish the interrupt handler */
   3021 	for (i = 0; i < sc->sc_nintrs; i++) {
   3022 		if (sc->sc_ihs[i] != NULL) {
   3023 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3024 			sc->sc_ihs[i] = NULL;
   3025 		}
   3026 	}
   3027 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3028 
   3029 	wm_free_txrx_queues(sc);
   3030 
   3031 	/* Unmap the registers */
   3032 	if (sc->sc_ss) {
   3033 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3034 		sc->sc_ss = 0;
   3035 	}
   3036 	if (sc->sc_ios) {
   3037 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3038 		sc->sc_ios = 0;
   3039 	}
   3040 	if (sc->sc_flashs) {
   3041 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3042 		sc->sc_flashs = 0;
   3043 	}
   3044 
   3045 	if (sc->sc_core_lock)
   3046 		mutex_obj_free(sc->sc_core_lock);
   3047 	if (sc->sc_ich_phymtx)
   3048 		mutex_obj_free(sc->sc_ich_phymtx);
   3049 	if (sc->sc_ich_nvmmtx)
   3050 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3051 
   3052 	return 0;
   3053 }
   3054 
   3055 static bool
   3056 wm_suspend(device_t self, const pmf_qual_t *qual)
   3057 {
   3058 	struct wm_softc *sc = device_private(self);
   3059 
   3060 	wm_release_manageability(sc);
   3061 	wm_release_hw_control(sc);
   3062 	wm_enable_wakeup(sc);
   3063 
   3064 	return true;
   3065 }
   3066 
   3067 static bool
   3068 wm_resume(device_t self, const pmf_qual_t *qual)
   3069 {
   3070 	struct wm_softc *sc = device_private(self);
   3071 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3072 	pcireg_t reg;
   3073 	char buf[256];
   3074 
   3075 	reg = CSR_READ(sc, WMREG_WUS);
   3076 	if (reg != 0) {
   3077 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3078 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3079 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3080 	}
   3081 
   3082 	if (sc->sc_type >= WM_T_PCH2)
   3083 		wm_resume_workarounds_pchlan(sc);
   3084 	if ((ifp->if_flags & IFF_UP) == 0) {
   3085 		wm_reset(sc);
   3086 		/* Non-AMT based hardware can now take control from firmware */
   3087 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3088 			wm_get_hw_control(sc);
   3089 		wm_init_manageability(sc);
   3090 	} else {
   3091 		/*
   3092 		 * We called pmf_class_network_register(), so if_init() is
   3093 		 * automatically called when IFF_UP. wm_reset(),
   3094 		 * wm_get_hw_control() and wm_init_manageability() are called
   3095 		 * via wm_init().
   3096 		 */
   3097 	}
   3098 
   3099 	return true;
   3100 }
   3101 
   3102 /*
   3103  * wm_watchdog:		[ifnet interface function]
   3104  *
   3105  *	Watchdog timer handler.
   3106  */
   3107 static void
   3108 wm_watchdog(struct ifnet *ifp)
   3109 {
   3110 	int qid;
   3111 	struct wm_softc *sc = ifp->if_softc;
   3112 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3113 
   3114 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3115 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3116 
   3117 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3118 	}
   3119 
   3120 	/*
   3121 	 * IF any of queues hanged up, reset the interface.
   3122 	 */
   3123 	if (hang_queue != 0) {
   3124 		(void) wm_init(ifp);
   3125 
   3126 		/*
   3127 		 * There are still some upper layer processing which call
   3128 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3129 		 */
   3130 		/* Try to get more packets going. */
   3131 		ifp->if_start(ifp);
   3132 	}
   3133 }
   3134 
   3135 
   3136 static void
   3137 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3138 {
   3139 
   3140 	mutex_enter(txq->txq_lock);
   3141 	if (txq->txq_sending &&
   3142 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3143 		wm_watchdog_txq_locked(ifp, txq, hang);
   3144 	}
   3145 	mutex_exit(txq->txq_lock);
   3146 }
   3147 
   3148 static void
   3149 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3150     uint16_t *hang)
   3151 {
   3152 	struct wm_softc *sc = ifp->if_softc;
   3153 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3154 
   3155 	KASSERT(mutex_owned(txq->txq_lock));
   3156 
   3157 	/*
   3158 	 * Since we're using delayed interrupts, sweep up
   3159 	 * before we report an error.
   3160 	 */
   3161 	wm_txeof(txq, UINT_MAX);
   3162 
   3163 	if (txq->txq_sending)
   3164 		*hang |= __BIT(wmq->wmq_id);
   3165 
   3166 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3167 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3168 		    device_xname(sc->sc_dev));
   3169 	} else {
   3170 #ifdef WM_DEBUG
   3171 		int i, j;
   3172 		struct wm_txsoft *txs;
   3173 #endif
   3174 		log(LOG_ERR,
   3175 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3176 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3177 		    txq->txq_next);
   3178 		ifp->if_oerrors++;
   3179 #ifdef WM_DEBUG
   3180 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3181 		    i = WM_NEXTTXS(txq, i)) {
   3182 		    txs = &txq->txq_soft[i];
   3183 		    printf("txs %d tx %d -> %d\n",
   3184 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3185 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3186 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3187 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3188 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3189 				    printf("\t %#08x%08x\n",
   3190 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3191 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3192 			    } else {
   3193 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3194 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3195 					txq->txq_descs[j].wtx_addr.wa_low);
   3196 				    printf("\t %#04x%02x%02x%08x\n",
   3197 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3198 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3199 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3200 					txq->txq_descs[j].wtx_cmdlen);
   3201 			    }
   3202 			if (j == txs->txs_lastdesc)
   3203 				break;
   3204 			}
   3205 		}
   3206 #endif
   3207 	}
   3208 }
   3209 
   3210 /*
   3211  * wm_tick:
   3212  *
   3213  *	One second timer, used to check link status, sweep up
   3214  *	completed transmit jobs, etc.
   3215  */
   3216 static void
   3217 wm_tick(void *arg)
   3218 {
   3219 	struct wm_softc *sc = arg;
   3220 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3221 #ifndef WM_MPSAFE
   3222 	int s = splnet();
   3223 #endif
   3224 
   3225 	WM_CORE_LOCK(sc);
   3226 
   3227 	if (sc->sc_core_stopping) {
   3228 		WM_CORE_UNLOCK(sc);
   3229 #ifndef WM_MPSAFE
   3230 		splx(s);
   3231 #endif
   3232 		return;
   3233 	}
   3234 
   3235 	if (sc->sc_type >= WM_T_82542_2_1) {
   3236 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3237 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3238 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3239 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3240 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3241 	}
   3242 
   3243 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3244 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3245 	    + CSR_READ(sc, WMREG_CRCERRS)
   3246 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3247 	    + CSR_READ(sc, WMREG_SYMERRC)
   3248 	    + CSR_READ(sc, WMREG_RXERRC)
   3249 	    + CSR_READ(sc, WMREG_SEC)
   3250 	    + CSR_READ(sc, WMREG_CEXTERR)
   3251 	    + CSR_READ(sc, WMREG_RLEC);
   3252 	/*
   3253 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3254 	 * memory. It does not mean the number of dropped packet. Because
   3255 	 * ethernet controller can receive packets in such case if there is
   3256 	 * space in phy's FIFO.
   3257 	 *
   3258 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3259 	 * own EVCNT instead of if_iqdrops.
   3260 	 */
   3261 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3262 
   3263 	if (sc->sc_flags & WM_F_HAS_MII)
   3264 		mii_tick(&sc->sc_mii);
   3265 	else if ((sc->sc_type >= WM_T_82575)
   3266 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3267 		wm_serdes_tick(sc);
   3268 	else
   3269 		wm_tbi_tick(sc);
   3270 
   3271 	WM_CORE_UNLOCK(sc);
   3272 
   3273 	wm_watchdog(ifp);
   3274 
   3275 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3276 }
   3277 
   3278 static int
   3279 wm_ifflags_cb(struct ethercom *ec)
   3280 {
   3281 	struct ifnet *ifp = &ec->ec_if;
   3282 	struct wm_softc *sc = ifp->if_softc;
   3283 	int iffchange, ecchange;
   3284 	bool needreset = false;
   3285 	int rc = 0;
   3286 
   3287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3288 		device_xname(sc->sc_dev), __func__));
   3289 
   3290 	WM_CORE_LOCK(sc);
   3291 
   3292 	/*
   3293 	 * Check for if_flags.
   3294 	 * Main usage is to prevent linkdown when opening bpf.
   3295 	 */
   3296 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3297 	sc->sc_if_flags = ifp->if_flags;
   3298 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3299 		needreset = true;
   3300 		goto ec;
   3301 	}
   3302 
   3303 	/* iff related updates */
   3304 	if ((iffchange & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3305 		wm_set_filter(sc);
   3306 
   3307 	wm_set_vlan(sc);
   3308 
   3309 ec:
   3310 	/* Check for ec_capenable. */
   3311 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3312 	sc->sc_ec_capenable = ec->ec_capenable;
   3313 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3314 		needreset = true;
   3315 		goto out;
   3316 	}
   3317 
   3318 	/* ec related updates */
   3319 	wm_set_eee(sc);
   3320 
   3321 out:
   3322 	if (needreset)
   3323 		rc = ENETRESET;
   3324 	WM_CORE_UNLOCK(sc);
   3325 
   3326 	return rc;
   3327 }
   3328 
   3329 /*
   3330  * wm_ioctl:		[ifnet interface function]
   3331  *
   3332  *	Handle control requests from the operator.
   3333  */
   3334 static int
   3335 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3336 {
   3337 	struct wm_softc *sc = ifp->if_softc;
   3338 	struct ifreq *ifr = (struct ifreq *) data;
   3339 	struct ifaddr *ifa = (struct ifaddr *)data;
   3340 	struct sockaddr_dl *sdl;
   3341 	int s, error;
   3342 
   3343 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3344 		device_xname(sc->sc_dev), __func__));
   3345 
   3346 #ifndef WM_MPSAFE
   3347 	s = splnet();
   3348 #endif
   3349 	switch (cmd) {
   3350 	case SIOCSIFMEDIA:
   3351 	case SIOCGIFMEDIA:
   3352 		WM_CORE_LOCK(sc);
   3353 		/* Flow control requires full-duplex mode. */
   3354 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3355 		    (ifr->ifr_media & IFM_FDX) == 0)
   3356 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3357 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3358 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3359 				/* We can do both TXPAUSE and RXPAUSE. */
   3360 				ifr->ifr_media |=
   3361 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3362 			}
   3363 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3364 		}
   3365 		WM_CORE_UNLOCK(sc);
   3366 #ifdef WM_MPSAFE
   3367 		s = splnet();
   3368 #endif
   3369 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3370 #ifdef WM_MPSAFE
   3371 		splx(s);
   3372 #endif
   3373 		break;
   3374 	case SIOCINITIFADDR:
   3375 		WM_CORE_LOCK(sc);
   3376 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3377 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3378 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3379 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3380 			/* unicast address is first multicast entry */
   3381 			wm_set_filter(sc);
   3382 			error = 0;
   3383 			WM_CORE_UNLOCK(sc);
   3384 			break;
   3385 		}
   3386 		WM_CORE_UNLOCK(sc);
   3387 		/*FALLTHROUGH*/
   3388 	default:
   3389 #ifdef WM_MPSAFE
   3390 		s = splnet();
   3391 #endif
   3392 		/* It may call wm_start, so unlock here */
   3393 		error = ether_ioctl(ifp, cmd, data);
   3394 #ifdef WM_MPSAFE
   3395 		splx(s);
   3396 #endif
   3397 		if (error != ENETRESET)
   3398 			break;
   3399 
   3400 		error = 0;
   3401 
   3402 		if (cmd == SIOCSIFCAP)
   3403 			error = (*ifp->if_init)(ifp);
   3404 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3405 			;
   3406 		else if (ifp->if_flags & IFF_RUNNING) {
   3407 			/*
   3408 			 * Multicast list has changed; set the hardware filter
   3409 			 * accordingly.
   3410 			 */
   3411 			WM_CORE_LOCK(sc);
   3412 			wm_set_filter(sc);
   3413 			WM_CORE_UNLOCK(sc);
   3414 		}
   3415 		break;
   3416 	}
   3417 
   3418 #ifndef WM_MPSAFE
   3419 	splx(s);
   3420 #endif
   3421 	return error;
   3422 }
   3423 
   3424 /* MAC address related */
   3425 
   3426 /*
   3427  * Get the offset of MAC address and return it.
   3428  * If error occured, use offset 0.
   3429  */
   3430 static uint16_t
   3431 wm_check_alt_mac_addr(struct wm_softc *sc)
   3432 {
   3433 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3434 	uint16_t offset = NVM_OFF_MACADDR;
   3435 
   3436 	/* Try to read alternative MAC address pointer */
   3437 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3438 		return 0;
   3439 
   3440 	/* Check pointer if it's valid or not. */
   3441 	if ((offset == 0x0000) || (offset == 0xffff))
   3442 		return 0;
   3443 
   3444 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3445 	/*
   3446 	 * Check whether alternative MAC address is valid or not.
   3447 	 * Some cards have non 0xffff pointer but those don't use
   3448 	 * alternative MAC address in reality.
   3449 	 *
   3450 	 * Check whether the broadcast bit is set or not.
   3451 	 */
   3452 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3453 		if (((myea[0] & 0xff) & 0x01) == 0)
   3454 			return offset; /* Found */
   3455 
   3456 	/* Not found */
   3457 	return 0;
   3458 }
   3459 
   3460 static int
   3461 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3462 {
   3463 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3464 	uint16_t offset = NVM_OFF_MACADDR;
   3465 	int do_invert = 0;
   3466 
   3467 	switch (sc->sc_type) {
   3468 	case WM_T_82580:
   3469 	case WM_T_I350:
   3470 	case WM_T_I354:
   3471 		/* EEPROM Top Level Partitioning */
   3472 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3473 		break;
   3474 	case WM_T_82571:
   3475 	case WM_T_82575:
   3476 	case WM_T_82576:
   3477 	case WM_T_80003:
   3478 	case WM_T_I210:
   3479 	case WM_T_I211:
   3480 		offset = wm_check_alt_mac_addr(sc);
   3481 		if (offset == 0)
   3482 			if ((sc->sc_funcid & 0x01) == 1)
   3483 				do_invert = 1;
   3484 		break;
   3485 	default:
   3486 		if ((sc->sc_funcid & 0x01) == 1)
   3487 			do_invert = 1;
   3488 		break;
   3489 	}
   3490 
   3491 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3492 		goto bad;
   3493 
   3494 	enaddr[0] = myea[0] & 0xff;
   3495 	enaddr[1] = myea[0] >> 8;
   3496 	enaddr[2] = myea[1] & 0xff;
   3497 	enaddr[3] = myea[1] >> 8;
   3498 	enaddr[4] = myea[2] & 0xff;
   3499 	enaddr[5] = myea[2] >> 8;
   3500 
   3501 	/*
   3502 	 * Toggle the LSB of the MAC address on the second port
   3503 	 * of some dual port cards.
   3504 	 */
   3505 	if (do_invert != 0)
   3506 		enaddr[5] ^= 1;
   3507 
   3508 	return 0;
   3509 
   3510  bad:
   3511 	return -1;
   3512 }
   3513 
   3514 /*
   3515  * wm_set_ral:
   3516  *
   3517  *	Set an entery in the receive address list.
   3518  */
   3519 static void
   3520 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3521 {
   3522 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3523 	uint32_t wlock_mac;
   3524 	int rv;
   3525 
   3526 	if (enaddr != NULL) {
   3527 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3528 		    (enaddr[3] << 24);
   3529 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3530 		ral_hi |= RAL_AV;
   3531 	} else {
   3532 		ral_lo = 0;
   3533 		ral_hi = 0;
   3534 	}
   3535 
   3536 	switch (sc->sc_type) {
   3537 	case WM_T_82542_2_0:
   3538 	case WM_T_82542_2_1:
   3539 	case WM_T_82543:
   3540 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3541 		CSR_WRITE_FLUSH(sc);
   3542 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3543 		CSR_WRITE_FLUSH(sc);
   3544 		break;
   3545 	case WM_T_PCH2:
   3546 	case WM_T_PCH_LPT:
   3547 	case WM_T_PCH_SPT:
   3548 	case WM_T_PCH_CNP:
   3549 		if (idx == 0) {
   3550 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3551 			CSR_WRITE_FLUSH(sc);
   3552 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3553 			CSR_WRITE_FLUSH(sc);
   3554 			return;
   3555 		}
   3556 		if (sc->sc_type != WM_T_PCH2) {
   3557 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3558 			    FWSM_WLOCK_MAC);
   3559 			addrl = WMREG_SHRAL(idx - 1);
   3560 			addrh = WMREG_SHRAH(idx - 1);
   3561 		} else {
   3562 			wlock_mac = 0;
   3563 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3564 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3565 		}
   3566 
   3567 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3568 			rv = wm_get_swflag_ich8lan(sc);
   3569 			if (rv != 0)
   3570 				return;
   3571 			CSR_WRITE(sc, addrl, ral_lo);
   3572 			CSR_WRITE_FLUSH(sc);
   3573 			CSR_WRITE(sc, addrh, ral_hi);
   3574 			CSR_WRITE_FLUSH(sc);
   3575 			wm_put_swflag_ich8lan(sc);
   3576 		}
   3577 
   3578 		break;
   3579 	default:
   3580 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3581 		CSR_WRITE_FLUSH(sc);
   3582 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3583 		CSR_WRITE_FLUSH(sc);
   3584 		break;
   3585 	}
   3586 }
   3587 
   3588 /*
   3589  * wm_mchash:
   3590  *
   3591  *	Compute the hash of the multicast address for the 4096-bit
   3592  *	multicast filter.
   3593  */
   3594 static uint32_t
   3595 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3596 {
   3597 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3598 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3599 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3600 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3601 	uint32_t hash;
   3602 
   3603 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3604 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3605 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3606 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3607 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3608 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3609 		return (hash & 0x3ff);
   3610 	}
   3611 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3612 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3613 
   3614 	return (hash & 0xfff);
   3615 }
   3616 
   3617 /*
   3618  *
   3619  *
   3620  */
   3621 static int
   3622 wm_rar_count(struct wm_softc *sc)
   3623 {
   3624 	int size;
   3625 
   3626 	switch (sc->sc_type) {
   3627 	case WM_T_ICH8:
   3628 		size = WM_RAL_TABSIZE_ICH8 -1;
   3629 		break;
   3630 	case WM_T_ICH9:
   3631 	case WM_T_ICH10:
   3632 	case WM_T_PCH:
   3633 		size = WM_RAL_TABSIZE_ICH8;
   3634 		break;
   3635 	case WM_T_PCH2:
   3636 		size = WM_RAL_TABSIZE_PCH2;
   3637 		break;
   3638 	case WM_T_PCH_LPT:
   3639 	case WM_T_PCH_SPT:
   3640 	case WM_T_PCH_CNP:
   3641 		size = WM_RAL_TABSIZE_PCH_LPT;
   3642 		break;
   3643 	case WM_T_82575:
   3644 		size = WM_RAL_TABSIZE_82575;
   3645 		break;
   3646 	case WM_T_82576:
   3647 	case WM_T_82580:
   3648 		size = WM_RAL_TABSIZE_82576;
   3649 		break;
   3650 	case WM_T_I350:
   3651 	case WM_T_I354:
   3652 		size = WM_RAL_TABSIZE_I350;
   3653 		break;
   3654 	default:
   3655 		size = WM_RAL_TABSIZE;
   3656 	}
   3657 
   3658 	return size;
   3659 }
   3660 
   3661 /*
   3662  * wm_set_filter:
   3663  *
   3664  *	Set up the receive filter.
   3665  */
   3666 static void
   3667 wm_set_filter(struct wm_softc *sc)
   3668 {
   3669 	struct ethercom *ec = &sc->sc_ethercom;
   3670 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3671 	struct ether_multi *enm;
   3672 	struct ether_multistep step;
   3673 	bus_addr_t mta_reg;
   3674 	uint32_t hash, reg, bit;
   3675 	int i, size, ralmax;
   3676 
   3677 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3678 		device_xname(sc->sc_dev), __func__));
   3679 
   3680 	if (sc->sc_type >= WM_T_82544)
   3681 		mta_reg = WMREG_CORDOVA_MTA;
   3682 	else
   3683 		mta_reg = WMREG_MTA;
   3684 
   3685 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3686 
   3687 	if (ifp->if_flags & IFF_BROADCAST)
   3688 		sc->sc_rctl |= RCTL_BAM;
   3689 	if (ifp->if_flags & IFF_PROMISC) {
   3690 		sc->sc_rctl |= RCTL_UPE;
   3691 		goto allmulti;
   3692 	}
   3693 
   3694 	/*
   3695 	 * Set the station address in the first RAL slot, and
   3696 	 * clear the remaining slots.
   3697 	 */
   3698 	size = wm_rar_count(sc);
   3699 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3700 
   3701 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3702 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3703 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3704 		switch (i) {
   3705 		case 0:
   3706 			/* We can use all entries */
   3707 			ralmax = size;
   3708 			break;
   3709 		case 1:
   3710 			/* Only RAR[0] */
   3711 			ralmax = 1;
   3712 			break;
   3713 		default:
   3714 			/* available SHRA + RAR[0] */
   3715 			ralmax = i + 1;
   3716 		}
   3717 	} else
   3718 		ralmax = size;
   3719 	for (i = 1; i < size; i++) {
   3720 		if (i < ralmax)
   3721 			wm_set_ral(sc, NULL, i);
   3722 	}
   3723 
   3724 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3725 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3726 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3727 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3728 		size = WM_ICH8_MC_TABSIZE;
   3729 	else
   3730 		size = WM_MC_TABSIZE;
   3731 	/* Clear out the multicast table. */
   3732 	for (i = 0; i < size; i++) {
   3733 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3734 		CSR_WRITE_FLUSH(sc);
   3735 	}
   3736 
   3737 	ETHER_LOCK(ec);
   3738 	ETHER_FIRST_MULTI(step, ec, enm);
   3739 	while (enm != NULL) {
   3740 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3741 			ETHER_UNLOCK(ec);
   3742 			/*
   3743 			 * We must listen to a range of multicast addresses.
   3744 			 * For now, just accept all multicasts, rather than
   3745 			 * trying to set only those filter bits needed to match
   3746 			 * the range.  (At this time, the only use of address
   3747 			 * ranges is for IP multicast routing, for which the
   3748 			 * range is big enough to require all bits set.)
   3749 			 */
   3750 			goto allmulti;
   3751 		}
   3752 
   3753 		hash = wm_mchash(sc, enm->enm_addrlo);
   3754 
   3755 		reg = (hash >> 5);
   3756 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3757 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3758 		    || (sc->sc_type == WM_T_PCH2)
   3759 		    || (sc->sc_type == WM_T_PCH_LPT)
   3760 		    || (sc->sc_type == WM_T_PCH_SPT)
   3761 		    || (sc->sc_type == WM_T_PCH_CNP))
   3762 			reg &= 0x1f;
   3763 		else
   3764 			reg &= 0x7f;
   3765 		bit = hash & 0x1f;
   3766 
   3767 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3768 		hash |= 1U << bit;
   3769 
   3770 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3771 			/*
   3772 			 * 82544 Errata 9: Certain register cannot be written
   3773 			 * with particular alignments in PCI-X bus operation
   3774 			 * (FCAH, MTA and VFTA).
   3775 			 */
   3776 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3777 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3778 			CSR_WRITE_FLUSH(sc);
   3779 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3780 			CSR_WRITE_FLUSH(sc);
   3781 		} else {
   3782 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3783 			CSR_WRITE_FLUSH(sc);
   3784 		}
   3785 
   3786 		ETHER_NEXT_MULTI(step, enm);
   3787 	}
   3788 	ETHER_UNLOCK(ec);
   3789 
   3790 	ifp->if_flags &= ~IFF_ALLMULTI;
   3791 	goto setit;
   3792 
   3793  allmulti:
   3794 	ifp->if_flags |= IFF_ALLMULTI;
   3795 	sc->sc_rctl |= RCTL_MPE;
   3796 
   3797  setit:
   3798 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3799 }
   3800 
   3801 /* Reset and init related */
   3802 
   3803 static void
   3804 wm_set_vlan(struct wm_softc *sc)
   3805 {
   3806 
   3807 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3808 		device_xname(sc->sc_dev), __func__));
   3809 
   3810 	/* Deal with VLAN enables. */
   3811 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3812 		sc->sc_ctrl |= CTRL_VME;
   3813 	else
   3814 		sc->sc_ctrl &= ~CTRL_VME;
   3815 
   3816 	/* Write the control registers. */
   3817 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3818 }
   3819 
   3820 static void
   3821 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3822 {
   3823 	uint32_t gcr;
   3824 	pcireg_t ctrl2;
   3825 
   3826 	gcr = CSR_READ(sc, WMREG_GCR);
   3827 
   3828 	/* Only take action if timeout value is defaulted to 0 */
   3829 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3830 		goto out;
   3831 
   3832 	if ((gcr & GCR_CAP_VER2) == 0) {
   3833 		gcr |= GCR_CMPL_TMOUT_10MS;
   3834 		goto out;
   3835 	}
   3836 
   3837 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3838 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3839 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3840 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3841 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3842 
   3843 out:
   3844 	/* Disable completion timeout resend */
   3845 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3846 
   3847 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3848 }
   3849 
   3850 void
   3851 wm_get_auto_rd_done(struct wm_softc *sc)
   3852 {
   3853 	int i;
   3854 
   3855 	/* wait for eeprom to reload */
   3856 	switch (sc->sc_type) {
   3857 	case WM_T_82571:
   3858 	case WM_T_82572:
   3859 	case WM_T_82573:
   3860 	case WM_T_82574:
   3861 	case WM_T_82583:
   3862 	case WM_T_82575:
   3863 	case WM_T_82576:
   3864 	case WM_T_82580:
   3865 	case WM_T_I350:
   3866 	case WM_T_I354:
   3867 	case WM_T_I210:
   3868 	case WM_T_I211:
   3869 	case WM_T_80003:
   3870 	case WM_T_ICH8:
   3871 	case WM_T_ICH9:
   3872 		for (i = 0; i < 10; i++) {
   3873 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3874 				break;
   3875 			delay(1000);
   3876 		}
   3877 		if (i == 10) {
   3878 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3879 			    "complete\n", device_xname(sc->sc_dev));
   3880 		}
   3881 		break;
   3882 	default:
   3883 		break;
   3884 	}
   3885 }
   3886 
   3887 void
   3888 wm_lan_init_done(struct wm_softc *sc)
   3889 {
   3890 	uint32_t reg = 0;
   3891 	int i;
   3892 
   3893 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3894 		device_xname(sc->sc_dev), __func__));
   3895 
   3896 	/* Wait for eeprom to reload */
   3897 	switch (sc->sc_type) {
   3898 	case WM_T_ICH10:
   3899 	case WM_T_PCH:
   3900 	case WM_T_PCH2:
   3901 	case WM_T_PCH_LPT:
   3902 	case WM_T_PCH_SPT:
   3903 	case WM_T_PCH_CNP:
   3904 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3905 			reg = CSR_READ(sc, WMREG_STATUS);
   3906 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3907 				break;
   3908 			delay(100);
   3909 		}
   3910 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3911 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3912 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3913 		}
   3914 		break;
   3915 	default:
   3916 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3917 		    __func__);
   3918 		break;
   3919 	}
   3920 
   3921 	reg &= ~STATUS_LAN_INIT_DONE;
   3922 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3923 }
   3924 
   3925 void
   3926 wm_get_cfg_done(struct wm_softc *sc)
   3927 {
   3928 	int mask;
   3929 	uint32_t reg;
   3930 	int i;
   3931 
   3932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3933 		device_xname(sc->sc_dev), __func__));
   3934 
   3935 	/* Wait for eeprom to reload */
   3936 	switch (sc->sc_type) {
   3937 	case WM_T_82542_2_0:
   3938 	case WM_T_82542_2_1:
   3939 		/* null */
   3940 		break;
   3941 	case WM_T_82543:
   3942 	case WM_T_82544:
   3943 	case WM_T_82540:
   3944 	case WM_T_82545:
   3945 	case WM_T_82545_3:
   3946 	case WM_T_82546:
   3947 	case WM_T_82546_3:
   3948 	case WM_T_82541:
   3949 	case WM_T_82541_2:
   3950 	case WM_T_82547:
   3951 	case WM_T_82547_2:
   3952 	case WM_T_82573:
   3953 	case WM_T_82574:
   3954 	case WM_T_82583:
   3955 		/* generic */
   3956 		delay(10*1000);
   3957 		break;
   3958 	case WM_T_80003:
   3959 	case WM_T_82571:
   3960 	case WM_T_82572:
   3961 	case WM_T_82575:
   3962 	case WM_T_82576:
   3963 	case WM_T_82580:
   3964 	case WM_T_I350:
   3965 	case WM_T_I354:
   3966 	case WM_T_I210:
   3967 	case WM_T_I211:
   3968 		if (sc->sc_type == WM_T_82571) {
   3969 			/* Only 82571 shares port 0 */
   3970 			mask = EEMNGCTL_CFGDONE_0;
   3971 		} else
   3972 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3973 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3974 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3975 				break;
   3976 			delay(1000);
   3977 		}
   3978 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3979 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3980 				device_xname(sc->sc_dev), __func__));
   3981 		}
   3982 		break;
   3983 	case WM_T_ICH8:
   3984 	case WM_T_ICH9:
   3985 	case WM_T_ICH10:
   3986 	case WM_T_PCH:
   3987 	case WM_T_PCH2:
   3988 	case WM_T_PCH_LPT:
   3989 	case WM_T_PCH_SPT:
   3990 	case WM_T_PCH_CNP:
   3991 		delay(10*1000);
   3992 		if (sc->sc_type >= WM_T_ICH10)
   3993 			wm_lan_init_done(sc);
   3994 		else
   3995 			wm_get_auto_rd_done(sc);
   3996 
   3997 		/* Clear PHY Reset Asserted bit */
   3998 		reg = CSR_READ(sc, WMREG_STATUS);
   3999 		if ((reg & STATUS_PHYRA) != 0)
   4000 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4001 		break;
   4002 	default:
   4003 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4004 		    __func__);
   4005 		break;
   4006 	}
   4007 }
   4008 
   4009 int
   4010 wm_phy_post_reset(struct wm_softc *sc)
   4011 {
   4012 	uint16_t reg;
   4013 	int rv = 0;
   4014 
   4015 	/* This function is only for ICH8 and newer. */
   4016 	if (sc->sc_type < WM_T_ICH8)
   4017 		return 0;
   4018 
   4019 	if (wm_phy_resetisblocked(sc)) {
   4020 		/* XXX */
   4021 		device_printf(sc->sc_dev, "PHY is blocked\n");
   4022 		return -1;
   4023 	}
   4024 
   4025 	/* Allow time for h/w to get to quiescent state after reset */
   4026 	delay(10*1000);
   4027 
   4028 	/* Perform any necessary post-reset workarounds */
   4029 	if (sc->sc_type == WM_T_PCH)
   4030 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4031 	else if (sc->sc_type == WM_T_PCH2)
   4032 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4033 	if (rv != 0)
   4034 		return rv;
   4035 
   4036 	/* Clear the host wakeup bit after lcd reset */
   4037 	if (sc->sc_type >= WM_T_PCH) {
   4038 		wm_gmii_hv_readreg(sc->sc_dev, 2, BM_PORT_GEN_CFG, &reg);
   4039 		reg &= ~BM_WUC_HOST_WU_BIT;
   4040 		wm_gmii_hv_writereg(sc->sc_dev, 2, BM_PORT_GEN_CFG, reg);
   4041 	}
   4042 
   4043 	/* Configure the LCD with the extended configuration region in NVM */
   4044 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4045 		return rv;
   4046 
   4047 	/* Configure the LCD with the OEM bits in NVM */
   4048 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4049 
   4050 	if (sc->sc_type == WM_T_PCH2) {
   4051 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4052 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4053 			delay(10 * 1000);
   4054 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4055 		}
   4056 		/* XXX Set EEE LPI Update Timer to 200usec */
   4057 	}
   4058 
   4059 	return rv;
   4060 }
   4061 
   4062 /* Only for PCH and newer */
   4063 static int
   4064 wm_write_smbus_addr(struct wm_softc *sc)
   4065 {
   4066 	uint32_t strap, freq;
   4067 	uint16_t phy_data;
   4068 	int rv;
   4069 
   4070 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4071 		device_xname(sc->sc_dev), __func__));
   4072 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4073 
   4074 	strap = CSR_READ(sc, WMREG_STRAP);
   4075 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4076 
   4077 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4078 	if (rv != 0)
   4079 		return -1;
   4080 
   4081 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4082 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4083 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4084 
   4085 	if (sc->sc_phytype == WMPHY_I217) {
   4086 		/* Restore SMBus frequency */
   4087 		if (freq --) {
   4088 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4089 			    | HV_SMB_ADDR_FREQ_HIGH);
   4090 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4091 			    HV_SMB_ADDR_FREQ_LOW);
   4092 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4093 			    HV_SMB_ADDR_FREQ_HIGH);
   4094 		} else {
   4095 			DPRINTF(WM_DEBUG_INIT,
   4096 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4097 				device_xname(sc->sc_dev), __func__));
   4098 		}
   4099 	}
   4100 
   4101 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4102 	    phy_data);
   4103 }
   4104 
   4105 static int
   4106 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4107 {
   4108 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4109 	uint16_t phy_page = 0;
   4110 	int rv = 0;
   4111 
   4112 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4113 		device_xname(sc->sc_dev), __func__));
   4114 
   4115 	switch (sc->sc_type) {
   4116 	case WM_T_ICH8:
   4117 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4118 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4119 			return 0;
   4120 
   4121 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4122 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4123 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4124 			break;
   4125 		}
   4126 		/* FALLTHROUGH */
   4127 	case WM_T_PCH:
   4128 	case WM_T_PCH2:
   4129 	case WM_T_PCH_LPT:
   4130 	case WM_T_PCH_SPT:
   4131 	case WM_T_PCH_CNP:
   4132 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4133 		break;
   4134 	default:
   4135 		return 0;
   4136 	}
   4137 
   4138 	if ((rv = sc->phy.acquire(sc)) != 0)
   4139 		return rv;
   4140 
   4141 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4142 	if ((reg & sw_cfg_mask) == 0)
   4143 		goto release;
   4144 
   4145 	/*
   4146 	 * Make sure HW does not configure LCD from PHY extended configuration
   4147 	 * before SW configuration
   4148 	 */
   4149 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4150 	if ((sc->sc_type < WM_T_PCH2)
   4151 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4152 		goto release;
   4153 
   4154 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4155 		device_xname(sc->sc_dev), __func__));
   4156 	/* word_addr is in DWORD */
   4157 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4158 
   4159 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4160 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4161 	if (cnf_size == 0)
   4162 		goto release;
   4163 
   4164 	if (((sc->sc_type == WM_T_PCH)
   4165 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4166 	    || (sc->sc_type > WM_T_PCH)) {
   4167 		/*
   4168 		 * HW configures the SMBus address and LEDs when the OEM and
   4169 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4170 		 * are cleared, SW will configure them instead.
   4171 		 */
   4172 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4173 			device_xname(sc->sc_dev), __func__));
   4174 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4175 			goto release;
   4176 
   4177 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4178 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4179 		    (uint16_t)reg);
   4180 		if (rv != 0)
   4181 			goto release;
   4182 	}
   4183 
   4184 	/* Configure LCD from extended configuration region. */
   4185 	for (i = 0; i < cnf_size; i++) {
   4186 		uint16_t reg_data, reg_addr;
   4187 
   4188 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4189 			goto release;
   4190 
   4191 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4192 			goto release;
   4193 
   4194 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4195 			phy_page = reg_data;
   4196 
   4197 		reg_addr &= IGPHY_MAXREGADDR;
   4198 		reg_addr |= phy_page;
   4199 
   4200 		KASSERT(sc->phy.writereg_locked != NULL);
   4201 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4202 		    reg_data);
   4203 	}
   4204 
   4205 release:
   4206 	sc->phy.release(sc);
   4207 	return rv;
   4208 }
   4209 
   4210 /*
   4211  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4212  *  @sc:       pointer to the HW structure
   4213  *  @d0_state: boolean if entering d0 or d3 device state
   4214  *
   4215  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4216  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4217  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4218  */
   4219 int
   4220 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4221 {
   4222 	uint32_t mac_reg;
   4223 	uint16_t oem_reg;
   4224 	int rv;
   4225 
   4226 	if (sc->sc_type < WM_T_PCH)
   4227 		return 0;
   4228 
   4229 	rv = sc->phy.acquire(sc);
   4230 	if (rv != 0)
   4231 		return rv;
   4232 
   4233 	if (sc->sc_type == WM_T_PCH) {
   4234 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4235 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4236 			goto release;
   4237 	}
   4238 
   4239 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4240 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4241 		goto release;
   4242 
   4243 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4244 
   4245 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4246 	if (rv != 0)
   4247 		goto release;
   4248 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4249 
   4250 	if (d0_state) {
   4251 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4252 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4253 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4254 			oem_reg |= HV_OEM_BITS_LPLU;
   4255 	} else {
   4256 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4257 		    != 0)
   4258 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4259 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4260 		    != 0)
   4261 			oem_reg |= HV_OEM_BITS_LPLU;
   4262 	}
   4263 
   4264 	/* Set Restart auto-neg to activate the bits */
   4265 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4266 	    && (wm_phy_resetisblocked(sc) == false))
   4267 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4268 
   4269 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4270 
   4271 release:
   4272 	sc->phy.release(sc);
   4273 
   4274 	return rv;
   4275 }
   4276 
   4277 /* Init hardware bits */
   4278 void
   4279 wm_initialize_hardware_bits(struct wm_softc *sc)
   4280 {
   4281 	uint32_t tarc0, tarc1, reg;
   4282 
   4283 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4284 		device_xname(sc->sc_dev), __func__));
   4285 
   4286 	/* For 82571 variant, 80003 and ICHs */
   4287 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4288 	    || (sc->sc_type >= WM_T_80003)) {
   4289 
   4290 		/* Transmit Descriptor Control 0 */
   4291 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4292 		reg |= TXDCTL_COUNT_DESC;
   4293 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4294 
   4295 		/* Transmit Descriptor Control 1 */
   4296 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4297 		reg |= TXDCTL_COUNT_DESC;
   4298 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4299 
   4300 		/* TARC0 */
   4301 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4302 		switch (sc->sc_type) {
   4303 		case WM_T_82571:
   4304 		case WM_T_82572:
   4305 		case WM_T_82573:
   4306 		case WM_T_82574:
   4307 		case WM_T_82583:
   4308 		case WM_T_80003:
   4309 			/* Clear bits 30..27 */
   4310 			tarc0 &= ~__BITS(30, 27);
   4311 			break;
   4312 		default:
   4313 			break;
   4314 		}
   4315 
   4316 		switch (sc->sc_type) {
   4317 		case WM_T_82571:
   4318 		case WM_T_82572:
   4319 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4320 
   4321 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4322 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4323 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4324 			/* 8257[12] Errata No.7 */
   4325 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4326 
   4327 			/* TARC1 bit 28 */
   4328 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4329 				tarc1 &= ~__BIT(28);
   4330 			else
   4331 				tarc1 |= __BIT(28);
   4332 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4333 
   4334 			/*
   4335 			 * 8257[12] Errata No.13
   4336 			 * Disable Dyamic Clock Gating.
   4337 			 */
   4338 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4339 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4340 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4341 			break;
   4342 		case WM_T_82573:
   4343 		case WM_T_82574:
   4344 		case WM_T_82583:
   4345 			if ((sc->sc_type == WM_T_82574)
   4346 			    || (sc->sc_type == WM_T_82583))
   4347 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4348 
   4349 			/* Extended Device Control */
   4350 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4351 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4352 			reg |= __BIT(22);	/* Set bit 22 */
   4353 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4354 
   4355 			/* Device Control */
   4356 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4357 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4358 
   4359 			/* PCIe Control Register */
   4360 			/*
   4361 			 * 82573 Errata (unknown).
   4362 			 *
   4363 			 * 82574 Errata 25 and 82583 Errata 12
   4364 			 * "Dropped Rx Packets":
   4365 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4366 			 */
   4367 			reg = CSR_READ(sc, WMREG_GCR);
   4368 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4369 			CSR_WRITE(sc, WMREG_GCR, reg);
   4370 
   4371 			if ((sc->sc_type == WM_T_82574)
   4372 			    || (sc->sc_type == WM_T_82583)) {
   4373 				/*
   4374 				 * Document says this bit must be set for
   4375 				 * proper operation.
   4376 				 */
   4377 				reg = CSR_READ(sc, WMREG_GCR);
   4378 				reg |= __BIT(22);
   4379 				CSR_WRITE(sc, WMREG_GCR, reg);
   4380 
   4381 				/*
   4382 				 * Apply workaround for hardware errata
   4383 				 * documented in errata docs Fixes issue where
   4384 				 * some error prone or unreliable PCIe
   4385 				 * completions are occurring, particularly
   4386 				 * with ASPM enabled. Without fix, issue can
   4387 				 * cause Tx timeouts.
   4388 				 */
   4389 				reg = CSR_READ(sc, WMREG_GCR2);
   4390 				reg |= __BIT(0);
   4391 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4392 			}
   4393 			break;
   4394 		case WM_T_80003:
   4395 			/* TARC0 */
   4396 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4397 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4398 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4399 
   4400 			/* TARC1 bit 28 */
   4401 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4402 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4403 				tarc1 &= ~__BIT(28);
   4404 			else
   4405 				tarc1 |= __BIT(28);
   4406 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4407 			break;
   4408 		case WM_T_ICH8:
   4409 		case WM_T_ICH9:
   4410 		case WM_T_ICH10:
   4411 		case WM_T_PCH:
   4412 		case WM_T_PCH2:
   4413 		case WM_T_PCH_LPT:
   4414 		case WM_T_PCH_SPT:
   4415 		case WM_T_PCH_CNP:
   4416 			/* TARC0 */
   4417 			if (sc->sc_type == WM_T_ICH8) {
   4418 				/* Set TARC0 bits 29 and 28 */
   4419 				tarc0 |= __BITS(29, 28);
   4420 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4421 				tarc0 |= __BIT(29);
   4422 				/*
   4423 				 *  Drop bit 28. From Linux.
   4424 				 * See I218/I219 spec update
   4425 				 * "5. Buffer Overrun While the I219 is
   4426 				 * Processing DMA Transactions"
   4427 				 */
   4428 				tarc0 &= ~__BIT(28);
   4429 			}
   4430 			/* Set TARC0 bits 23,24,26,27 */
   4431 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4432 
   4433 			/* CTRL_EXT */
   4434 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4435 			reg |= __BIT(22);	/* Set bit 22 */
   4436 			/*
   4437 			 * Enable PHY low-power state when MAC is at D3
   4438 			 * w/o WoL
   4439 			 */
   4440 			if (sc->sc_type >= WM_T_PCH)
   4441 				reg |= CTRL_EXT_PHYPDEN;
   4442 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4443 
   4444 			/* TARC1 */
   4445 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4446 			/* bit 28 */
   4447 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4448 				tarc1 &= ~__BIT(28);
   4449 			else
   4450 				tarc1 |= __BIT(28);
   4451 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4452 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4453 
   4454 			/* Device Status */
   4455 			if (sc->sc_type == WM_T_ICH8) {
   4456 				reg = CSR_READ(sc, WMREG_STATUS);
   4457 				reg &= ~__BIT(31);
   4458 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4459 
   4460 			}
   4461 
   4462 			/* IOSFPC */
   4463 			if (sc->sc_type == WM_T_PCH_SPT) {
   4464 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4465 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4466 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4467 			}
   4468 			/*
   4469 			 * Work-around descriptor data corruption issue during
   4470 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4471 			 * capability.
   4472 			 */
   4473 			reg = CSR_READ(sc, WMREG_RFCTL);
   4474 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4475 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4476 			break;
   4477 		default:
   4478 			break;
   4479 		}
   4480 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4481 
   4482 		switch (sc->sc_type) {
   4483 		/*
   4484 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4485 		 * Avoid RSS Hash Value bug.
   4486 		 */
   4487 		case WM_T_82571:
   4488 		case WM_T_82572:
   4489 		case WM_T_82573:
   4490 		case WM_T_80003:
   4491 		case WM_T_ICH8:
   4492 			reg = CSR_READ(sc, WMREG_RFCTL);
   4493 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4494 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4495 			break;
   4496 		case WM_T_82574:
   4497 			/* use extened Rx descriptor. */
   4498 			reg = CSR_READ(sc, WMREG_RFCTL);
   4499 			reg |= WMREG_RFCTL_EXSTEN;
   4500 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4501 			break;
   4502 		default:
   4503 			break;
   4504 		}
   4505 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4506 		/*
   4507 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4508 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4509 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4510 		 * Correctly by the Device"
   4511 		 *
   4512 		 * I354(C2000) Errata AVR53:
   4513 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4514 		 * Hang"
   4515 		 */
   4516 		reg = CSR_READ(sc, WMREG_RFCTL);
   4517 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4518 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4519 	}
   4520 }
   4521 
   4522 static uint32_t
   4523 wm_rxpbs_adjust_82580(uint32_t val)
   4524 {
   4525 	uint32_t rv = 0;
   4526 
   4527 	if (val < __arraycount(wm_82580_rxpbs_table))
   4528 		rv = wm_82580_rxpbs_table[val];
   4529 
   4530 	return rv;
   4531 }
   4532 
   4533 /*
   4534  * wm_reset_phy:
   4535  *
   4536  *	generic PHY reset function.
   4537  *	Same as e1000_phy_hw_reset_generic()
   4538  */
   4539 static int
   4540 wm_reset_phy(struct wm_softc *sc)
   4541 {
   4542 	uint32_t reg;
   4543 
   4544 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4545 		device_xname(sc->sc_dev), __func__));
   4546 	if (wm_phy_resetisblocked(sc))
   4547 		return -1;
   4548 
   4549 	sc->phy.acquire(sc);
   4550 
   4551 	reg = CSR_READ(sc, WMREG_CTRL);
   4552 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4553 	CSR_WRITE_FLUSH(sc);
   4554 
   4555 	delay(sc->phy.reset_delay_us);
   4556 
   4557 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4558 	CSR_WRITE_FLUSH(sc);
   4559 
   4560 	delay(150);
   4561 
   4562 	sc->phy.release(sc);
   4563 
   4564 	wm_get_cfg_done(sc);
   4565 	wm_phy_post_reset(sc);
   4566 
   4567 	return 0;
   4568 }
   4569 
   4570 /*
   4571  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4572  * so it is enough to check sc->sc_queue[0] only.
   4573  */
   4574 static void
   4575 wm_flush_desc_rings(struct wm_softc *sc)
   4576 {
   4577 	pcireg_t preg;
   4578 	uint32_t reg;
   4579 	struct wm_txqueue *txq;
   4580 	wiseman_txdesc_t *txd;
   4581 	int nexttx;
   4582 	uint32_t rctl;
   4583 
   4584 	/* First, disable MULR fix in FEXTNVM11 */
   4585 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4586 	reg |= FEXTNVM11_DIS_MULRFIX;
   4587 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4588 
   4589 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4590 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4591 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4592 		return;
   4593 
   4594 	/* TX */
   4595 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4596 	    device_xname(sc->sc_dev), preg, reg);
   4597 	reg = CSR_READ(sc, WMREG_TCTL);
   4598 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4599 
   4600 	txq = &sc->sc_queue[0].wmq_txq;
   4601 	nexttx = txq->txq_next;
   4602 	txd = &txq->txq_descs[nexttx];
   4603 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4604 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4605 	txd->wtx_fields.wtxu_status = 0;
   4606 	txd->wtx_fields.wtxu_options = 0;
   4607 	txd->wtx_fields.wtxu_vlan = 0;
   4608 
   4609 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4610 	    BUS_SPACE_BARRIER_WRITE);
   4611 
   4612 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4613 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4614 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4615 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4616 	delay(250);
   4617 
   4618 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4619 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4620 		return;
   4621 
   4622 	/* RX */
   4623 	printf("%s: Need RX flush (reg = %08x)\n",
   4624 	    device_xname(sc->sc_dev), preg);
   4625 	rctl = CSR_READ(sc, WMREG_RCTL);
   4626 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4627 	CSR_WRITE_FLUSH(sc);
   4628 	delay(150);
   4629 
   4630 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4631 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4632 	reg &= 0xffffc000;
   4633 	/*
   4634 	 * update thresholds: prefetch threshold to 31, host threshold
   4635 	 * to 1 and make sure the granularity is "descriptors" and not
   4636 	 * "cache lines"
   4637 	 */
   4638 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4639 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4640 
   4641 	/*
   4642 	 * momentarily enable the RX ring for the changes to take
   4643 	 * effect
   4644 	 */
   4645 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4646 	CSR_WRITE_FLUSH(sc);
   4647 	delay(150);
   4648 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4649 }
   4650 
   4651 /*
   4652  * wm_reset:
   4653  *
   4654  *	Reset the i82542 chip.
   4655  */
   4656 static void
   4657 wm_reset(struct wm_softc *sc)
   4658 {
   4659 	int phy_reset = 0;
   4660 	int i, error = 0;
   4661 	uint32_t reg;
   4662 	uint16_t kmreg;
   4663 	int rv;
   4664 
   4665 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4666 		device_xname(sc->sc_dev), __func__));
   4667 	KASSERT(sc->sc_type != 0);
   4668 
   4669 	/*
   4670 	 * Allocate on-chip memory according to the MTU size.
   4671 	 * The Packet Buffer Allocation register must be written
   4672 	 * before the chip is reset.
   4673 	 */
   4674 	switch (sc->sc_type) {
   4675 	case WM_T_82547:
   4676 	case WM_T_82547_2:
   4677 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4678 		    PBA_22K : PBA_30K;
   4679 		for (i = 0; i < sc->sc_nqueues; i++) {
   4680 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4681 			txq->txq_fifo_head = 0;
   4682 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4683 			txq->txq_fifo_size =
   4684 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4685 			txq->txq_fifo_stall = 0;
   4686 		}
   4687 		break;
   4688 	case WM_T_82571:
   4689 	case WM_T_82572:
   4690 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4691 	case WM_T_80003:
   4692 		sc->sc_pba = PBA_32K;
   4693 		break;
   4694 	case WM_T_82573:
   4695 		sc->sc_pba = PBA_12K;
   4696 		break;
   4697 	case WM_T_82574:
   4698 	case WM_T_82583:
   4699 		sc->sc_pba = PBA_20K;
   4700 		break;
   4701 	case WM_T_82576:
   4702 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4703 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4704 		break;
   4705 	case WM_T_82580:
   4706 	case WM_T_I350:
   4707 	case WM_T_I354:
   4708 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4709 		break;
   4710 	case WM_T_I210:
   4711 	case WM_T_I211:
   4712 		sc->sc_pba = PBA_34K;
   4713 		break;
   4714 	case WM_T_ICH8:
   4715 		/* Workaround for a bit corruption issue in FIFO memory */
   4716 		sc->sc_pba = PBA_8K;
   4717 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4718 		break;
   4719 	case WM_T_ICH9:
   4720 	case WM_T_ICH10:
   4721 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4722 		    PBA_14K : PBA_10K;
   4723 		break;
   4724 	case WM_T_PCH:
   4725 	case WM_T_PCH2:	/* XXX 14K? */
   4726 	case WM_T_PCH_LPT:
   4727 	case WM_T_PCH_SPT:
   4728 	case WM_T_PCH_CNP:
   4729 		sc->sc_pba = PBA_26K;
   4730 		break;
   4731 	default:
   4732 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4733 		    PBA_40K : PBA_48K;
   4734 		break;
   4735 	}
   4736 	/*
   4737 	 * Only old or non-multiqueue devices have the PBA register
   4738 	 * XXX Need special handling for 82575.
   4739 	 */
   4740 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4741 	    || (sc->sc_type == WM_T_82575))
   4742 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4743 
   4744 	/* Prevent the PCI-E bus from sticking */
   4745 	if (sc->sc_flags & WM_F_PCIE) {
   4746 		int timeout = 800;
   4747 
   4748 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4749 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4750 
   4751 		while (timeout--) {
   4752 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4753 			    == 0)
   4754 				break;
   4755 			delay(100);
   4756 		}
   4757 		if (timeout == 0)
   4758 			device_printf(sc->sc_dev,
   4759 			    "failed to disable busmastering\n");
   4760 	}
   4761 
   4762 	/* Set the completion timeout for interface */
   4763 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4764 	    || (sc->sc_type == WM_T_82580)
   4765 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4766 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4767 		wm_set_pcie_completion_timeout(sc);
   4768 
   4769 	/* Clear interrupt */
   4770 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4771 	if (wm_is_using_msix(sc)) {
   4772 		if (sc->sc_type != WM_T_82574) {
   4773 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4774 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4775 		} else
   4776 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4777 	}
   4778 
   4779 	/* Stop the transmit and receive processes. */
   4780 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4781 	sc->sc_rctl &= ~RCTL_EN;
   4782 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4783 	CSR_WRITE_FLUSH(sc);
   4784 
   4785 	/* XXX set_tbi_sbp_82543() */
   4786 
   4787 	delay(10*1000);
   4788 
   4789 	/* Must acquire the MDIO ownership before MAC reset */
   4790 	switch (sc->sc_type) {
   4791 	case WM_T_82573:
   4792 	case WM_T_82574:
   4793 	case WM_T_82583:
   4794 		error = wm_get_hw_semaphore_82573(sc);
   4795 		break;
   4796 	default:
   4797 		break;
   4798 	}
   4799 
   4800 	/*
   4801 	 * 82541 Errata 29? & 82547 Errata 28?
   4802 	 * See also the description about PHY_RST bit in CTRL register
   4803 	 * in 8254x_GBe_SDM.pdf.
   4804 	 */
   4805 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4806 		CSR_WRITE(sc, WMREG_CTRL,
   4807 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4808 		CSR_WRITE_FLUSH(sc);
   4809 		delay(5000);
   4810 	}
   4811 
   4812 	switch (sc->sc_type) {
   4813 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4814 	case WM_T_82541:
   4815 	case WM_T_82541_2:
   4816 	case WM_T_82547:
   4817 	case WM_T_82547_2:
   4818 		/*
   4819 		 * On some chipsets, a reset through a memory-mapped write
   4820 		 * cycle can cause the chip to reset before completing the
   4821 		 * write cycle. This causes major headache that can be avoided
   4822 		 * by issuing the reset via indirect register writes through
   4823 		 * I/O space.
   4824 		 *
   4825 		 * So, if we successfully mapped the I/O BAR at attach time,
   4826 		 * use that. Otherwise, try our luck with a memory-mapped
   4827 		 * reset.
   4828 		 */
   4829 		if (sc->sc_flags & WM_F_IOH_VALID)
   4830 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4831 		else
   4832 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4833 		break;
   4834 	case WM_T_82545_3:
   4835 	case WM_T_82546_3:
   4836 		/* Use the shadow control register on these chips. */
   4837 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4838 		break;
   4839 	case WM_T_80003:
   4840 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4841 		sc->phy.acquire(sc);
   4842 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4843 		sc->phy.release(sc);
   4844 		break;
   4845 	case WM_T_ICH8:
   4846 	case WM_T_ICH9:
   4847 	case WM_T_ICH10:
   4848 	case WM_T_PCH:
   4849 	case WM_T_PCH2:
   4850 	case WM_T_PCH_LPT:
   4851 	case WM_T_PCH_SPT:
   4852 	case WM_T_PCH_CNP:
   4853 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4854 		if (wm_phy_resetisblocked(sc) == false) {
   4855 			/*
   4856 			 * Gate automatic PHY configuration by hardware on
   4857 			 * non-managed 82579
   4858 			 */
   4859 			if ((sc->sc_type == WM_T_PCH2)
   4860 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4861 				== 0))
   4862 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4863 
   4864 			reg |= CTRL_PHY_RESET;
   4865 			phy_reset = 1;
   4866 		} else
   4867 			printf("XXX reset is blocked!!!\n");
   4868 		sc->phy.acquire(sc);
   4869 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4870 		/* Don't insert a completion barrier when reset */
   4871 		delay(20*1000);
   4872 		mutex_exit(sc->sc_ich_phymtx);
   4873 		break;
   4874 	case WM_T_82580:
   4875 	case WM_T_I350:
   4876 	case WM_T_I354:
   4877 	case WM_T_I210:
   4878 	case WM_T_I211:
   4879 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4880 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4881 			CSR_WRITE_FLUSH(sc);
   4882 		delay(5000);
   4883 		break;
   4884 	case WM_T_82542_2_0:
   4885 	case WM_T_82542_2_1:
   4886 	case WM_T_82543:
   4887 	case WM_T_82540:
   4888 	case WM_T_82545:
   4889 	case WM_T_82546:
   4890 	case WM_T_82571:
   4891 	case WM_T_82572:
   4892 	case WM_T_82573:
   4893 	case WM_T_82574:
   4894 	case WM_T_82575:
   4895 	case WM_T_82576:
   4896 	case WM_T_82583:
   4897 	default:
   4898 		/* Everything else can safely use the documented method. */
   4899 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4900 		break;
   4901 	}
   4902 
   4903 	/* Must release the MDIO ownership after MAC reset */
   4904 	switch (sc->sc_type) {
   4905 	case WM_T_82573:
   4906 	case WM_T_82574:
   4907 	case WM_T_82583:
   4908 		if (error == 0)
   4909 			wm_put_hw_semaphore_82573(sc);
   4910 		break;
   4911 	default:
   4912 		break;
   4913 	}
   4914 
   4915 	/* Set Phy Config Counter to 50msec */
   4916 	if (sc->sc_type == WM_T_PCH2) {
   4917 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4918 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4919 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4920 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4921 	}
   4922 
   4923 	if (phy_reset != 0)
   4924 		wm_get_cfg_done(sc);
   4925 
   4926 	/* reload EEPROM */
   4927 	switch (sc->sc_type) {
   4928 	case WM_T_82542_2_0:
   4929 	case WM_T_82542_2_1:
   4930 	case WM_T_82543:
   4931 	case WM_T_82544:
   4932 		delay(10);
   4933 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4934 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4935 		CSR_WRITE_FLUSH(sc);
   4936 		delay(2000);
   4937 		break;
   4938 	case WM_T_82540:
   4939 	case WM_T_82545:
   4940 	case WM_T_82545_3:
   4941 	case WM_T_82546:
   4942 	case WM_T_82546_3:
   4943 		delay(5*1000);
   4944 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4945 		break;
   4946 	case WM_T_82541:
   4947 	case WM_T_82541_2:
   4948 	case WM_T_82547:
   4949 	case WM_T_82547_2:
   4950 		delay(20000);
   4951 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4952 		break;
   4953 	case WM_T_82571:
   4954 	case WM_T_82572:
   4955 	case WM_T_82573:
   4956 	case WM_T_82574:
   4957 	case WM_T_82583:
   4958 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4959 			delay(10);
   4960 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4961 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4962 			CSR_WRITE_FLUSH(sc);
   4963 		}
   4964 		/* check EECD_EE_AUTORD */
   4965 		wm_get_auto_rd_done(sc);
   4966 		/*
   4967 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4968 		 * is set.
   4969 		 */
   4970 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4971 		    || (sc->sc_type == WM_T_82583))
   4972 			delay(25*1000);
   4973 		break;
   4974 	case WM_T_82575:
   4975 	case WM_T_82576:
   4976 	case WM_T_82580:
   4977 	case WM_T_I350:
   4978 	case WM_T_I354:
   4979 	case WM_T_I210:
   4980 	case WM_T_I211:
   4981 	case WM_T_80003:
   4982 		/* check EECD_EE_AUTORD */
   4983 		wm_get_auto_rd_done(sc);
   4984 		break;
   4985 	case WM_T_ICH8:
   4986 	case WM_T_ICH9:
   4987 	case WM_T_ICH10:
   4988 	case WM_T_PCH:
   4989 	case WM_T_PCH2:
   4990 	case WM_T_PCH_LPT:
   4991 	case WM_T_PCH_SPT:
   4992 	case WM_T_PCH_CNP:
   4993 		break;
   4994 	default:
   4995 		panic("%s: unknown type\n", __func__);
   4996 	}
   4997 
   4998 	/* Check whether EEPROM is present or not */
   4999 	switch (sc->sc_type) {
   5000 	case WM_T_82575:
   5001 	case WM_T_82576:
   5002 	case WM_T_82580:
   5003 	case WM_T_I350:
   5004 	case WM_T_I354:
   5005 	case WM_T_ICH8:
   5006 	case WM_T_ICH9:
   5007 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5008 			/* Not found */
   5009 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5010 			if (sc->sc_type == WM_T_82575)
   5011 				wm_reset_init_script_82575(sc);
   5012 		}
   5013 		break;
   5014 	default:
   5015 		break;
   5016 	}
   5017 
   5018 	if (phy_reset != 0)
   5019 		wm_phy_post_reset(sc);
   5020 
   5021 	if ((sc->sc_type == WM_T_82580)
   5022 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5023 		/* clear global device reset status bit */
   5024 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5025 	}
   5026 
   5027 	/* Clear any pending interrupt events. */
   5028 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5029 	reg = CSR_READ(sc, WMREG_ICR);
   5030 	if (wm_is_using_msix(sc)) {
   5031 		if (sc->sc_type != WM_T_82574) {
   5032 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5033 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5034 		} else
   5035 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5036 	}
   5037 
   5038 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5039 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5040 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5041 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5042 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5043 		reg |= KABGTXD_BGSQLBIAS;
   5044 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5045 	}
   5046 
   5047 	/* reload sc_ctrl */
   5048 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5049 
   5050 	wm_set_eee(sc);
   5051 
   5052 	/*
   5053 	 * For PCH, this write will make sure that any noise will be detected
   5054 	 * as a CRC error and be dropped rather than show up as a bad packet
   5055 	 * to the DMA engine
   5056 	 */
   5057 	if (sc->sc_type == WM_T_PCH)
   5058 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5059 
   5060 	if (sc->sc_type >= WM_T_82544)
   5061 		CSR_WRITE(sc, WMREG_WUC, 0);
   5062 
   5063 	if (sc->sc_type < WM_T_82575)
   5064 		wm_disable_aspm(sc); /* Workaround for some chips */
   5065 
   5066 	wm_reset_mdicnfg_82580(sc);
   5067 
   5068 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5069 		wm_pll_workaround_i210(sc);
   5070 
   5071 	if (sc->sc_type == WM_T_80003) {
   5072 		/* default to TRUE to enable the MDIC W/A */
   5073 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5074 
   5075 		rv = wm_kmrn_readreg(sc,
   5076 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5077 		if (rv == 0) {
   5078 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5079 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5080 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5081 			else
   5082 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5083 		}
   5084 	}
   5085 }
   5086 
   5087 /*
   5088  * wm_add_rxbuf:
   5089  *
   5090  *	Add a receive buffer to the indiciated descriptor.
   5091  */
   5092 static int
   5093 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5094 {
   5095 	struct wm_softc *sc = rxq->rxq_sc;
   5096 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5097 	struct mbuf *m;
   5098 	int error;
   5099 
   5100 	KASSERT(mutex_owned(rxq->rxq_lock));
   5101 
   5102 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5103 	if (m == NULL)
   5104 		return ENOBUFS;
   5105 
   5106 	MCLGET(m, M_DONTWAIT);
   5107 	if ((m->m_flags & M_EXT) == 0) {
   5108 		m_freem(m);
   5109 		return ENOBUFS;
   5110 	}
   5111 
   5112 	if (rxs->rxs_mbuf != NULL)
   5113 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5114 
   5115 	rxs->rxs_mbuf = m;
   5116 
   5117 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5118 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5119 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5120 	if (error) {
   5121 		/* XXX XXX XXX */
   5122 		aprint_error_dev(sc->sc_dev,
   5123 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5124 		panic("wm_add_rxbuf");
   5125 	}
   5126 
   5127 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5128 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5129 
   5130 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5131 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5132 			wm_init_rxdesc(rxq, idx);
   5133 	} else
   5134 		wm_init_rxdesc(rxq, idx);
   5135 
   5136 	return 0;
   5137 }
   5138 
   5139 /*
   5140  * wm_rxdrain:
   5141  *
   5142  *	Drain the receive queue.
   5143  */
   5144 static void
   5145 wm_rxdrain(struct wm_rxqueue *rxq)
   5146 {
   5147 	struct wm_softc *sc = rxq->rxq_sc;
   5148 	struct wm_rxsoft *rxs;
   5149 	int i;
   5150 
   5151 	KASSERT(mutex_owned(rxq->rxq_lock));
   5152 
   5153 	for (i = 0; i < WM_NRXDESC; i++) {
   5154 		rxs = &rxq->rxq_soft[i];
   5155 		if (rxs->rxs_mbuf != NULL) {
   5156 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5157 			m_freem(rxs->rxs_mbuf);
   5158 			rxs->rxs_mbuf = NULL;
   5159 		}
   5160 	}
   5161 }
   5162 
   5163 /*
   5164  * Setup registers for RSS.
   5165  *
   5166  * XXX not yet VMDq support
   5167  */
   5168 static void
   5169 wm_init_rss(struct wm_softc *sc)
   5170 {
   5171 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5172 	int i;
   5173 
   5174 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5175 
   5176 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5177 		int qid, reta_ent;
   5178 
   5179 		qid  = i % sc->sc_nqueues;
   5180 		switch (sc->sc_type) {
   5181 		case WM_T_82574:
   5182 			reta_ent = __SHIFTIN(qid,
   5183 			    RETA_ENT_QINDEX_MASK_82574);
   5184 			break;
   5185 		case WM_T_82575:
   5186 			reta_ent = __SHIFTIN(qid,
   5187 			    RETA_ENT_QINDEX1_MASK_82575);
   5188 			break;
   5189 		default:
   5190 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5191 			break;
   5192 		}
   5193 
   5194 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5195 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5196 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5197 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5198 	}
   5199 
   5200 	rss_getkey((uint8_t *)rss_key);
   5201 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5202 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5203 
   5204 	if (sc->sc_type == WM_T_82574)
   5205 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5206 	else
   5207 		mrqc = MRQC_ENABLE_RSS_MQ;
   5208 
   5209 	/*
   5210 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5211 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5212 	 */
   5213 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5214 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5215 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5216 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5217 
   5218 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5219 }
   5220 
   5221 /*
   5222  * Adjust TX and RX queue numbers which the system actulally uses.
   5223  *
   5224  * The numbers are affected by below parameters.
   5225  *     - The nubmer of hardware queues
   5226  *     - The number of MSI-X vectors (= "nvectors" argument)
   5227  *     - ncpu
   5228  */
   5229 static void
   5230 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5231 {
   5232 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5233 
   5234 	if (nvectors < 2) {
   5235 		sc->sc_nqueues = 1;
   5236 		return;
   5237 	}
   5238 
   5239 	switch (sc->sc_type) {
   5240 	case WM_T_82572:
   5241 		hw_ntxqueues = 2;
   5242 		hw_nrxqueues = 2;
   5243 		break;
   5244 	case WM_T_82574:
   5245 		hw_ntxqueues = 2;
   5246 		hw_nrxqueues = 2;
   5247 		break;
   5248 	case WM_T_82575:
   5249 		hw_ntxqueues = 4;
   5250 		hw_nrxqueues = 4;
   5251 		break;
   5252 	case WM_T_82576:
   5253 		hw_ntxqueues = 16;
   5254 		hw_nrxqueues = 16;
   5255 		break;
   5256 	case WM_T_82580:
   5257 	case WM_T_I350:
   5258 	case WM_T_I354:
   5259 		hw_ntxqueues = 8;
   5260 		hw_nrxqueues = 8;
   5261 		break;
   5262 	case WM_T_I210:
   5263 		hw_ntxqueues = 4;
   5264 		hw_nrxqueues = 4;
   5265 		break;
   5266 	case WM_T_I211:
   5267 		hw_ntxqueues = 2;
   5268 		hw_nrxqueues = 2;
   5269 		break;
   5270 		/*
   5271 		 * As below ethernet controllers does not support MSI-X,
   5272 		 * this driver let them not use multiqueue.
   5273 		 *     - WM_T_80003
   5274 		 *     - WM_T_ICH8
   5275 		 *     - WM_T_ICH9
   5276 		 *     - WM_T_ICH10
   5277 		 *     - WM_T_PCH
   5278 		 *     - WM_T_PCH2
   5279 		 *     - WM_T_PCH_LPT
   5280 		 */
   5281 	default:
   5282 		hw_ntxqueues = 1;
   5283 		hw_nrxqueues = 1;
   5284 		break;
   5285 	}
   5286 
   5287 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5288 
   5289 	/*
   5290 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5291 	 * the number of queues used actually.
   5292 	 */
   5293 	if (nvectors < hw_nqueues + 1)
   5294 		sc->sc_nqueues = nvectors - 1;
   5295 	else
   5296 		sc->sc_nqueues = hw_nqueues;
   5297 
   5298 	/*
   5299 	 * As queues more then cpus cannot improve scaling, we limit
   5300 	 * the number of queues used actually.
   5301 	 */
   5302 	if (ncpu < sc->sc_nqueues)
   5303 		sc->sc_nqueues = ncpu;
   5304 }
   5305 
   5306 static inline bool
   5307 wm_is_using_msix(struct wm_softc *sc)
   5308 {
   5309 
   5310 	return (sc->sc_nintrs > 1);
   5311 }
   5312 
   5313 static inline bool
   5314 wm_is_using_multiqueue(struct wm_softc *sc)
   5315 {
   5316 
   5317 	return (sc->sc_nqueues > 1);
   5318 }
   5319 
   5320 static int
   5321 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5322 {
   5323 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5324 	wmq->wmq_id = qidx;
   5325 	wmq->wmq_intr_idx = intr_idx;
   5326 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5327 #ifdef WM_MPSAFE
   5328 	    | SOFTINT_MPSAFE
   5329 #endif
   5330 	    , wm_handle_queue, wmq);
   5331 	if (wmq->wmq_si != NULL)
   5332 		return 0;
   5333 
   5334 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5335 	    wmq->wmq_id);
   5336 
   5337 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5338 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5339 	return ENOMEM;
   5340 }
   5341 
   5342 /*
   5343  * Both single interrupt MSI and INTx can use this function.
   5344  */
   5345 static int
   5346 wm_setup_legacy(struct wm_softc *sc)
   5347 {
   5348 	pci_chipset_tag_t pc = sc->sc_pc;
   5349 	const char *intrstr = NULL;
   5350 	char intrbuf[PCI_INTRSTR_LEN];
   5351 	int error;
   5352 
   5353 	error = wm_alloc_txrx_queues(sc);
   5354 	if (error) {
   5355 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5356 		    error);
   5357 		return ENOMEM;
   5358 	}
   5359 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5360 	    sizeof(intrbuf));
   5361 #ifdef WM_MPSAFE
   5362 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5363 #endif
   5364 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5365 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5366 	if (sc->sc_ihs[0] == NULL) {
   5367 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5368 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5369 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5370 		return ENOMEM;
   5371 	}
   5372 
   5373 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5374 	sc->sc_nintrs = 1;
   5375 
   5376 	return wm_softint_establish(sc, 0, 0);
   5377 }
   5378 
   5379 static int
   5380 wm_setup_msix(struct wm_softc *sc)
   5381 {
   5382 	void *vih;
   5383 	kcpuset_t *affinity;
   5384 	int qidx, error, intr_idx, txrx_established;
   5385 	pci_chipset_tag_t pc = sc->sc_pc;
   5386 	const char *intrstr = NULL;
   5387 	char intrbuf[PCI_INTRSTR_LEN];
   5388 	char intr_xname[INTRDEVNAMEBUF];
   5389 
   5390 	if (sc->sc_nqueues < ncpu) {
   5391 		/*
   5392 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5393 		 * interrupts start from CPU#1.
   5394 		 */
   5395 		sc->sc_affinity_offset = 1;
   5396 	} else {
   5397 		/*
   5398 		 * In this case, this device use all CPUs. So, we unify
   5399 		 * affinitied cpu_index to msix vector number for readability.
   5400 		 */
   5401 		sc->sc_affinity_offset = 0;
   5402 	}
   5403 
   5404 	error = wm_alloc_txrx_queues(sc);
   5405 	if (error) {
   5406 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5407 		    error);
   5408 		return ENOMEM;
   5409 	}
   5410 
   5411 	kcpuset_create(&affinity, false);
   5412 	intr_idx = 0;
   5413 
   5414 	/*
   5415 	 * TX and RX
   5416 	 */
   5417 	txrx_established = 0;
   5418 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5419 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5420 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5421 
   5422 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5423 		    sizeof(intrbuf));
   5424 #ifdef WM_MPSAFE
   5425 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5426 		    PCI_INTR_MPSAFE, true);
   5427 #endif
   5428 		memset(intr_xname, 0, sizeof(intr_xname));
   5429 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5430 		    device_xname(sc->sc_dev), qidx);
   5431 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5432 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5433 		if (vih == NULL) {
   5434 			aprint_error_dev(sc->sc_dev,
   5435 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5436 			    intrstr ? " at " : "",
   5437 			    intrstr ? intrstr : "");
   5438 
   5439 			goto fail;
   5440 		}
   5441 		kcpuset_zero(affinity);
   5442 		/* Round-robin affinity */
   5443 		kcpuset_set(affinity, affinity_to);
   5444 		error = interrupt_distribute(vih, affinity, NULL);
   5445 		if (error == 0) {
   5446 			aprint_normal_dev(sc->sc_dev,
   5447 			    "for TX and RX interrupting at %s affinity to %u\n",
   5448 			    intrstr, affinity_to);
   5449 		} else {
   5450 			aprint_normal_dev(sc->sc_dev,
   5451 			    "for TX and RX interrupting at %s\n", intrstr);
   5452 		}
   5453 		sc->sc_ihs[intr_idx] = vih;
   5454 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5455 			goto fail;
   5456 		txrx_established++;
   5457 		intr_idx++;
   5458 	}
   5459 
   5460 	/*
   5461 	 * LINK
   5462 	 */
   5463 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5464 	    sizeof(intrbuf));
   5465 #ifdef WM_MPSAFE
   5466 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5467 #endif
   5468 	memset(intr_xname, 0, sizeof(intr_xname));
   5469 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5470 	    device_xname(sc->sc_dev));
   5471 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5472 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5473 	if (vih == NULL) {
   5474 		aprint_error_dev(sc->sc_dev,
   5475 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5476 		    intrstr ? " at " : "",
   5477 		    intrstr ? intrstr : "");
   5478 
   5479 		goto fail;
   5480 	}
   5481 	/* keep default affinity to LINK interrupt */
   5482 	aprint_normal_dev(sc->sc_dev,
   5483 	    "for LINK interrupting at %s\n", intrstr);
   5484 	sc->sc_ihs[intr_idx] = vih;
   5485 	sc->sc_link_intr_idx = intr_idx;
   5486 
   5487 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5488 	kcpuset_destroy(affinity);
   5489 	return 0;
   5490 
   5491  fail:
   5492 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5493 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5494 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5495 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5496 	}
   5497 
   5498 	kcpuset_destroy(affinity);
   5499 	return ENOMEM;
   5500 }
   5501 
   5502 static void
   5503 wm_unset_stopping_flags(struct wm_softc *sc)
   5504 {
   5505 	int i;
   5506 
   5507 	KASSERT(WM_CORE_LOCKED(sc));
   5508 
   5509 	/*
   5510 	 * must unset stopping flags in ascending order.
   5511 	 */
   5512 	for (i = 0; i < sc->sc_nqueues; i++) {
   5513 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5514 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5515 
   5516 		mutex_enter(txq->txq_lock);
   5517 		txq->txq_stopping = false;
   5518 		mutex_exit(txq->txq_lock);
   5519 
   5520 		mutex_enter(rxq->rxq_lock);
   5521 		rxq->rxq_stopping = false;
   5522 		mutex_exit(rxq->rxq_lock);
   5523 	}
   5524 
   5525 	sc->sc_core_stopping = false;
   5526 }
   5527 
   5528 static void
   5529 wm_set_stopping_flags(struct wm_softc *sc)
   5530 {
   5531 	int i;
   5532 
   5533 	KASSERT(WM_CORE_LOCKED(sc));
   5534 
   5535 	sc->sc_core_stopping = true;
   5536 
   5537 	/*
   5538 	 * must set stopping flags in ascending order.
   5539 	 */
   5540 	for (i = 0; i < sc->sc_nqueues; i++) {
   5541 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5542 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5543 
   5544 		mutex_enter(rxq->rxq_lock);
   5545 		rxq->rxq_stopping = true;
   5546 		mutex_exit(rxq->rxq_lock);
   5547 
   5548 		mutex_enter(txq->txq_lock);
   5549 		txq->txq_stopping = true;
   5550 		mutex_exit(txq->txq_lock);
   5551 	}
   5552 }
   5553 
   5554 /*
   5555  * write interrupt interval value to ITR or EITR
   5556  */
   5557 static void
   5558 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5559 {
   5560 
   5561 	if (!wmq->wmq_set_itr)
   5562 		return;
   5563 
   5564 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5565 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5566 
   5567 		/*
   5568 		 * 82575 doesn't have CNT_INGR field.
   5569 		 * So, overwrite counter field by software.
   5570 		 */
   5571 		if (sc->sc_type == WM_T_82575)
   5572 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5573 		else
   5574 			eitr |= EITR_CNT_INGR;
   5575 
   5576 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5577 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5578 		/*
   5579 		 * 82574 has both ITR and EITR. SET EITR when we use
   5580 		 * the multi queue function with MSI-X.
   5581 		 */
   5582 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5583 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5584 	} else {
   5585 		KASSERT(wmq->wmq_id == 0);
   5586 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5587 	}
   5588 
   5589 	wmq->wmq_set_itr = false;
   5590 }
   5591 
   5592 /*
   5593  * TODO
   5594  * Below dynamic calculation of itr is almost the same as linux igb,
   5595  * however it does not fit to wm(4). So, we will have been disable AIM
   5596  * until we will find appropriate calculation of itr.
   5597  */
   5598 /*
   5599  * calculate interrupt interval value to be going to write register in
   5600  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5601  */
   5602 static void
   5603 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5604 {
   5605 #ifdef NOTYET
   5606 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5607 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5608 	uint32_t avg_size = 0;
   5609 	uint32_t new_itr;
   5610 
   5611 	if (rxq->rxq_packets)
   5612 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5613 	if (txq->txq_packets)
   5614 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5615 
   5616 	if (avg_size == 0) {
   5617 		new_itr = 450; /* restore default value */
   5618 		goto out;
   5619 	}
   5620 
   5621 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5622 	avg_size += 24;
   5623 
   5624 	/* Don't starve jumbo frames */
   5625 	avg_size = uimin(avg_size, 3000);
   5626 
   5627 	/* Give a little boost to mid-size frames */
   5628 	if ((avg_size > 300) && (avg_size < 1200))
   5629 		new_itr = avg_size / 3;
   5630 	else
   5631 		new_itr = avg_size / 2;
   5632 
   5633 out:
   5634 	/*
   5635 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5636 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5637 	 */
   5638 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5639 		new_itr *= 4;
   5640 
   5641 	if (new_itr != wmq->wmq_itr) {
   5642 		wmq->wmq_itr = new_itr;
   5643 		wmq->wmq_set_itr = true;
   5644 	} else
   5645 		wmq->wmq_set_itr = false;
   5646 
   5647 	rxq->rxq_packets = 0;
   5648 	rxq->rxq_bytes = 0;
   5649 	txq->txq_packets = 0;
   5650 	txq->txq_bytes = 0;
   5651 #endif
   5652 }
   5653 
   5654 /*
   5655  * wm_init:		[ifnet interface function]
   5656  *
   5657  *	Initialize the interface.
   5658  */
   5659 static int
   5660 wm_init(struct ifnet *ifp)
   5661 {
   5662 	struct wm_softc *sc = ifp->if_softc;
   5663 	int ret;
   5664 
   5665 	WM_CORE_LOCK(sc);
   5666 	ret = wm_init_locked(ifp);
   5667 	WM_CORE_UNLOCK(sc);
   5668 
   5669 	return ret;
   5670 }
   5671 
   5672 static int
   5673 wm_init_locked(struct ifnet *ifp)
   5674 {
   5675 	struct wm_softc *sc = ifp->if_softc;
   5676 	struct ethercom *ec = &sc->sc_ethercom;
   5677 	int i, j, trynum, error = 0;
   5678 	uint32_t reg;
   5679 
   5680 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5681 		device_xname(sc->sc_dev), __func__));
   5682 	KASSERT(WM_CORE_LOCKED(sc));
   5683 
   5684 	/*
   5685 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5686 	 * There is a small but measurable benefit to avoiding the adjusment
   5687 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5688 	 * on such platforms.  One possibility is that the DMA itself is
   5689 	 * slightly more efficient if the front of the entire packet (instead
   5690 	 * of the front of the headers) is aligned.
   5691 	 *
   5692 	 * Note we must always set align_tweak to 0 if we are using
   5693 	 * jumbo frames.
   5694 	 */
   5695 #ifdef __NO_STRICT_ALIGNMENT
   5696 	sc->sc_align_tweak = 0;
   5697 #else
   5698 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5699 		sc->sc_align_tweak = 0;
   5700 	else
   5701 		sc->sc_align_tweak = 2;
   5702 #endif /* __NO_STRICT_ALIGNMENT */
   5703 
   5704 	/* Cancel any pending I/O. */
   5705 	wm_stop_locked(ifp, 0);
   5706 
   5707 	/* update statistics before reset */
   5708 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5709 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5710 
   5711 	/* PCH_SPT hardware workaround */
   5712 	if (sc->sc_type == WM_T_PCH_SPT)
   5713 		wm_flush_desc_rings(sc);
   5714 
   5715 	/* Reset the chip to a known state. */
   5716 	wm_reset(sc);
   5717 
   5718 	/*
   5719 	 * AMT based hardware can now take control from firmware
   5720 	 * Do this after reset.
   5721 	 */
   5722 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5723 		wm_get_hw_control(sc);
   5724 
   5725 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5726 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5727 		wm_legacy_irq_quirk_spt(sc);
   5728 
   5729 	/* Init hardware bits */
   5730 	wm_initialize_hardware_bits(sc);
   5731 
   5732 	/* Reset the PHY. */
   5733 	if (sc->sc_flags & WM_F_HAS_MII)
   5734 		wm_gmii_reset(sc);
   5735 
   5736 	if (sc->sc_type >= WM_T_ICH8) {
   5737 		reg = CSR_READ(sc, WMREG_GCR);
   5738 		/*
   5739 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5740 		 * default after reset.
   5741 		 */
   5742 		if (sc->sc_type == WM_T_ICH8)
   5743 			reg |= GCR_NO_SNOOP_ALL;
   5744 		else
   5745 			reg &= ~GCR_NO_SNOOP_ALL;
   5746 		CSR_WRITE(sc, WMREG_GCR, reg);
   5747 	}
   5748 	if ((sc->sc_type >= WM_T_ICH8)
   5749 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5750 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5751 
   5752 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5753 		reg |= CTRL_EXT_RO_DIS;
   5754 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5755 	}
   5756 
   5757 	/* Calculate (E)ITR value */
   5758 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5759 		/*
   5760 		 * For NEWQUEUE's EITR (except for 82575).
   5761 		 * 82575's EITR should be set same throttling value as other
   5762 		 * old controllers' ITR because the interrupt/sec calculation
   5763 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5764 		 *
   5765 		 * 82574's EITR should be set same throttling value as ITR.
   5766 		 *
   5767 		 * For N interrupts/sec, set this value to:
   5768 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5769 		 */
   5770 		sc->sc_itr_init = 450;
   5771 	} else if (sc->sc_type >= WM_T_82543) {
   5772 		/*
   5773 		 * Set up the interrupt throttling register (units of 256ns)
   5774 		 * Note that a footnote in Intel's documentation says this
   5775 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5776 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5777 		 * that that is also true for the 1024ns units of the other
   5778 		 * interrupt-related timer registers -- so, really, we ought
   5779 		 * to divide this value by 4 when the link speed is low.
   5780 		 *
   5781 		 * XXX implement this division at link speed change!
   5782 		 */
   5783 
   5784 		/*
   5785 		 * For N interrupts/sec, set this value to:
   5786 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5787 		 * absolute and packet timer values to this value
   5788 		 * divided by 4 to get "simple timer" behavior.
   5789 		 */
   5790 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5791 	}
   5792 
   5793 	error = wm_init_txrx_queues(sc);
   5794 	if (error)
   5795 		goto out;
   5796 
   5797 	/*
   5798 	 * Clear out the VLAN table -- we don't use it (yet).
   5799 	 */
   5800 	CSR_WRITE(sc, WMREG_VET, 0);
   5801 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5802 		trynum = 10; /* Due to hw errata */
   5803 	else
   5804 		trynum = 1;
   5805 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5806 		for (j = 0; j < trynum; j++)
   5807 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5808 
   5809 	/*
   5810 	 * Set up flow-control parameters.
   5811 	 *
   5812 	 * XXX Values could probably stand some tuning.
   5813 	 */
   5814 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5815 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5816 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5817 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5818 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5819 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5820 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5821 	}
   5822 
   5823 	sc->sc_fcrtl = FCRTL_DFLT;
   5824 	if (sc->sc_type < WM_T_82543) {
   5825 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5826 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5827 	} else {
   5828 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5829 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5830 	}
   5831 
   5832 	if (sc->sc_type == WM_T_80003)
   5833 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5834 	else
   5835 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5836 
   5837 	/* Writes the control register. */
   5838 	wm_set_vlan(sc);
   5839 
   5840 	if (sc->sc_flags & WM_F_HAS_MII) {
   5841 		uint16_t kmreg;
   5842 
   5843 		switch (sc->sc_type) {
   5844 		case WM_T_80003:
   5845 		case WM_T_ICH8:
   5846 		case WM_T_ICH9:
   5847 		case WM_T_ICH10:
   5848 		case WM_T_PCH:
   5849 		case WM_T_PCH2:
   5850 		case WM_T_PCH_LPT:
   5851 		case WM_T_PCH_SPT:
   5852 		case WM_T_PCH_CNP:
   5853 			/*
   5854 			 * Set the mac to wait the maximum time between each
   5855 			 * iteration and increase the max iterations when
   5856 			 * polling the phy; this fixes erroneous timeouts at
   5857 			 * 10Mbps.
   5858 			 */
   5859 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5860 			    0xFFFF);
   5861 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5862 			    &kmreg);
   5863 			kmreg |= 0x3F;
   5864 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5865 			    kmreg);
   5866 			break;
   5867 		default:
   5868 			break;
   5869 		}
   5870 
   5871 		if (sc->sc_type == WM_T_80003) {
   5872 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5873 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5874 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5875 
   5876 			/* Bypass RX and TX FIFO's */
   5877 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5878 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5879 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5880 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5881 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5882 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5883 		}
   5884 	}
   5885 #if 0
   5886 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5887 #endif
   5888 
   5889 	/* Set up checksum offload parameters. */
   5890 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5891 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5892 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5893 		reg |= RXCSUM_IPOFL;
   5894 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5895 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5896 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5897 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5898 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5899 
   5900 	/* Set registers about MSI-X */
   5901 	if (wm_is_using_msix(sc)) {
   5902 		uint32_t ivar;
   5903 		struct wm_queue *wmq;
   5904 		int qid, qintr_idx;
   5905 
   5906 		if (sc->sc_type == WM_T_82575) {
   5907 			/* Interrupt control */
   5908 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5909 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5910 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5911 
   5912 			/* TX and RX */
   5913 			for (i = 0; i < sc->sc_nqueues; i++) {
   5914 				wmq = &sc->sc_queue[i];
   5915 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5916 				    EITR_TX_QUEUE(wmq->wmq_id)
   5917 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5918 			}
   5919 			/* Link status */
   5920 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5921 			    EITR_OTHER);
   5922 		} else if (sc->sc_type == WM_T_82574) {
   5923 			/* Interrupt control */
   5924 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5925 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5926 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5927 
   5928 			/*
   5929 			 * workaround issue with spurious interrupts
   5930 			 * in MSI-X mode.
   5931 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5932 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5933 			 */
   5934 			reg = CSR_READ(sc, WMREG_RFCTL);
   5935 			reg |= WMREG_RFCTL_ACKDIS;
   5936 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5937 
   5938 			ivar = 0;
   5939 			/* TX and RX */
   5940 			for (i = 0; i < sc->sc_nqueues; i++) {
   5941 				wmq = &sc->sc_queue[i];
   5942 				qid = wmq->wmq_id;
   5943 				qintr_idx = wmq->wmq_intr_idx;
   5944 
   5945 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5946 				    IVAR_TX_MASK_Q_82574(qid));
   5947 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5948 				    IVAR_RX_MASK_Q_82574(qid));
   5949 			}
   5950 			/* Link status */
   5951 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5952 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5953 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5954 		} else {
   5955 			/* Interrupt control */
   5956 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5957 			    | GPIE_EIAME | GPIE_PBA);
   5958 
   5959 			switch (sc->sc_type) {
   5960 			case WM_T_82580:
   5961 			case WM_T_I350:
   5962 			case WM_T_I354:
   5963 			case WM_T_I210:
   5964 			case WM_T_I211:
   5965 				/* TX and RX */
   5966 				for (i = 0; i < sc->sc_nqueues; i++) {
   5967 					wmq = &sc->sc_queue[i];
   5968 					qid = wmq->wmq_id;
   5969 					qintr_idx = wmq->wmq_intr_idx;
   5970 
   5971 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5972 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5973 					ivar |= __SHIFTIN((qintr_idx
   5974 						| IVAR_VALID),
   5975 					    IVAR_TX_MASK_Q(qid));
   5976 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5977 					ivar |= __SHIFTIN((qintr_idx
   5978 						| IVAR_VALID),
   5979 					    IVAR_RX_MASK_Q(qid));
   5980 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5981 				}
   5982 				break;
   5983 			case WM_T_82576:
   5984 				/* TX and RX */
   5985 				for (i = 0; i < sc->sc_nqueues; i++) {
   5986 					wmq = &sc->sc_queue[i];
   5987 					qid = wmq->wmq_id;
   5988 					qintr_idx = wmq->wmq_intr_idx;
   5989 
   5990 					ivar = CSR_READ(sc,
   5991 					    WMREG_IVAR_Q_82576(qid));
   5992 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5993 					ivar |= __SHIFTIN((qintr_idx
   5994 						| IVAR_VALID),
   5995 					    IVAR_TX_MASK_Q_82576(qid));
   5996 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5997 					ivar |= __SHIFTIN((qintr_idx
   5998 						| IVAR_VALID),
   5999 					    IVAR_RX_MASK_Q_82576(qid));
   6000 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6001 					    ivar);
   6002 				}
   6003 				break;
   6004 			default:
   6005 				break;
   6006 			}
   6007 
   6008 			/* Link status */
   6009 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6010 			    IVAR_MISC_OTHER);
   6011 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6012 		}
   6013 
   6014 		if (wm_is_using_multiqueue(sc)) {
   6015 			wm_init_rss(sc);
   6016 
   6017 			/*
   6018 			** NOTE: Receive Full-Packet Checksum Offload
   6019 			** is mutually exclusive with Multiqueue. However
   6020 			** this is not the same as TCP/IP checksums which
   6021 			** still work.
   6022 			*/
   6023 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6024 			reg |= RXCSUM_PCSD;
   6025 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6026 		}
   6027 	}
   6028 
   6029 	/* Set up the interrupt registers. */
   6030 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6031 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6032 	    ICR_RXO | ICR_RXT0;
   6033 	if (wm_is_using_msix(sc)) {
   6034 		uint32_t mask;
   6035 		struct wm_queue *wmq;
   6036 
   6037 		switch (sc->sc_type) {
   6038 		case WM_T_82574:
   6039 			mask = 0;
   6040 			for (i = 0; i < sc->sc_nqueues; i++) {
   6041 				wmq = &sc->sc_queue[i];
   6042 				mask |= ICR_TXQ(wmq->wmq_id);
   6043 				mask |= ICR_RXQ(wmq->wmq_id);
   6044 			}
   6045 			mask |= ICR_OTHER;
   6046 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6047 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6048 			break;
   6049 		default:
   6050 			if (sc->sc_type == WM_T_82575) {
   6051 				mask = 0;
   6052 				for (i = 0; i < sc->sc_nqueues; i++) {
   6053 					wmq = &sc->sc_queue[i];
   6054 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6055 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6056 				}
   6057 				mask |= EITR_OTHER;
   6058 			} else {
   6059 				mask = 0;
   6060 				for (i = 0; i < sc->sc_nqueues; i++) {
   6061 					wmq = &sc->sc_queue[i];
   6062 					mask |= 1 << wmq->wmq_intr_idx;
   6063 				}
   6064 				mask |= 1 << sc->sc_link_intr_idx;
   6065 			}
   6066 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6067 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6068 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6069 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6070 			break;
   6071 		}
   6072 	} else
   6073 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6074 
   6075 	/* Set up the inter-packet gap. */
   6076 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6077 
   6078 	if (sc->sc_type >= WM_T_82543) {
   6079 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6080 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6081 			wm_itrs_writereg(sc, wmq);
   6082 		}
   6083 		/*
   6084 		 * Link interrupts occur much less than TX
   6085 		 * interrupts and RX interrupts. So, we don't
   6086 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6087 		 * FreeBSD's if_igb.
   6088 		 */
   6089 	}
   6090 
   6091 	/* Set the VLAN ethernetype. */
   6092 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6093 
   6094 	/*
   6095 	 * Set up the transmit control register; we start out with
   6096 	 * a collision distance suitable for FDX, but update it whe
   6097 	 * we resolve the media type.
   6098 	 */
   6099 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6100 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6101 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6102 	if (sc->sc_type >= WM_T_82571)
   6103 		sc->sc_tctl |= TCTL_MULR;
   6104 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6105 
   6106 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6107 		/* Write TDT after TCTL.EN is set. See the document. */
   6108 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6109 	}
   6110 
   6111 	if (sc->sc_type == WM_T_80003) {
   6112 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6113 		reg &= ~TCTL_EXT_GCEX_MASK;
   6114 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6115 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6116 	}
   6117 
   6118 	/* Set the media. */
   6119 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6120 		goto out;
   6121 
   6122 	/* Configure for OS presence */
   6123 	wm_init_manageability(sc);
   6124 
   6125 	/*
   6126 	 * Set up the receive control register; we actually program the
   6127 	 * register when we set the receive filter. Use multicast address
   6128 	 * offset type 0.
   6129 	 *
   6130 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6131 	 * don't enable that feature.
   6132 	 */
   6133 	sc->sc_mchash_type = 0;
   6134 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6135 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6136 
   6137 	/*
   6138 	 * 82574 use one buffer extended Rx descriptor.
   6139 	 */
   6140 	if (sc->sc_type == WM_T_82574)
   6141 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6142 
   6143 	/*
   6144 	 * The I350 has a bug where it always strips the CRC whether
   6145 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6146 	 */
   6147 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6148 	    || (sc->sc_type == WM_T_I210))
   6149 		sc->sc_rctl |= RCTL_SECRC;
   6150 
   6151 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6152 	    && (ifp->if_mtu > ETHERMTU)) {
   6153 		sc->sc_rctl |= RCTL_LPE;
   6154 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6155 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6156 	}
   6157 
   6158 	if (MCLBYTES == 2048)
   6159 		sc->sc_rctl |= RCTL_2k;
   6160 	else {
   6161 		if (sc->sc_type >= WM_T_82543) {
   6162 			switch (MCLBYTES) {
   6163 			case 4096:
   6164 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6165 				break;
   6166 			case 8192:
   6167 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6168 				break;
   6169 			case 16384:
   6170 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6171 				break;
   6172 			default:
   6173 				panic("wm_init: MCLBYTES %d unsupported",
   6174 				    MCLBYTES);
   6175 				break;
   6176 			}
   6177 		} else
   6178 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6179 	}
   6180 
   6181 	/* Enable ECC */
   6182 	switch (sc->sc_type) {
   6183 	case WM_T_82571:
   6184 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6185 		reg |= PBA_ECC_CORR_EN;
   6186 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6187 		break;
   6188 	case WM_T_PCH_LPT:
   6189 	case WM_T_PCH_SPT:
   6190 	case WM_T_PCH_CNP:
   6191 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6192 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6193 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6194 
   6195 		sc->sc_ctrl |= CTRL_MEHE;
   6196 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6197 		break;
   6198 	default:
   6199 		break;
   6200 	}
   6201 
   6202 	/*
   6203 	 * Set the receive filter.
   6204 	 *
   6205 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6206 	 * the setting of RCTL.EN in wm_set_filter()
   6207 	 */
   6208 	wm_set_filter(sc);
   6209 
   6210 	/* On 575 and later set RDT only if RX enabled */
   6211 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6212 		int qidx;
   6213 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6214 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6215 			for (i = 0; i < WM_NRXDESC; i++) {
   6216 				mutex_enter(rxq->rxq_lock);
   6217 				wm_init_rxdesc(rxq, i);
   6218 				mutex_exit(rxq->rxq_lock);
   6219 
   6220 			}
   6221 		}
   6222 	}
   6223 
   6224 	wm_unset_stopping_flags(sc);
   6225 
   6226 	/* Start the one second link check clock. */
   6227 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6228 
   6229 	/* ...all done! */
   6230 	ifp->if_flags |= IFF_RUNNING;
   6231 	ifp->if_flags &= ~IFF_OACTIVE;
   6232 
   6233  out:
   6234 	/* Save last flags for the callback */
   6235 	sc->sc_if_flags = ifp->if_flags;
   6236 	sc->sc_ec_capenable = ec->ec_capenable;
   6237 	if (error)
   6238 		log(LOG_ERR, "%s: interface not running\n",
   6239 		    device_xname(sc->sc_dev));
   6240 	return error;
   6241 }
   6242 
   6243 /*
   6244  * wm_stop:		[ifnet interface function]
   6245  *
   6246  *	Stop transmission on the interface.
   6247  */
   6248 static void
   6249 wm_stop(struct ifnet *ifp, int disable)
   6250 {
   6251 	struct wm_softc *sc = ifp->if_softc;
   6252 
   6253 	WM_CORE_LOCK(sc);
   6254 	wm_stop_locked(ifp, disable);
   6255 	WM_CORE_UNLOCK(sc);
   6256 }
   6257 
   6258 static void
   6259 wm_stop_locked(struct ifnet *ifp, int disable)
   6260 {
   6261 	struct wm_softc *sc = ifp->if_softc;
   6262 	struct wm_txsoft *txs;
   6263 	int i, qidx;
   6264 
   6265 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6266 		device_xname(sc->sc_dev), __func__));
   6267 	KASSERT(WM_CORE_LOCKED(sc));
   6268 
   6269 	wm_set_stopping_flags(sc);
   6270 
   6271 	/* Stop the one second clock. */
   6272 	callout_stop(&sc->sc_tick_ch);
   6273 
   6274 	/* Stop the 82547 Tx FIFO stall check timer. */
   6275 	if (sc->sc_type == WM_T_82547)
   6276 		callout_stop(&sc->sc_txfifo_ch);
   6277 
   6278 	if (sc->sc_flags & WM_F_HAS_MII) {
   6279 		/* Down the MII. */
   6280 		mii_down(&sc->sc_mii);
   6281 	} else {
   6282 #if 0
   6283 		/* Should we clear PHY's status properly? */
   6284 		wm_reset(sc);
   6285 #endif
   6286 	}
   6287 
   6288 	/* Stop the transmit and receive processes. */
   6289 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6290 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6291 	sc->sc_rctl &= ~RCTL_EN;
   6292 
   6293 	/*
   6294 	 * Clear the interrupt mask to ensure the device cannot assert its
   6295 	 * interrupt line.
   6296 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6297 	 * service any currently pending or shared interrupt.
   6298 	 */
   6299 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6300 	sc->sc_icr = 0;
   6301 	if (wm_is_using_msix(sc)) {
   6302 		if (sc->sc_type != WM_T_82574) {
   6303 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6304 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6305 		} else
   6306 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6307 	}
   6308 
   6309 	/* Release any queued transmit buffers. */
   6310 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6311 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6312 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6313 		mutex_enter(txq->txq_lock);
   6314 		txq->txq_sending = false; /* ensure watchdog disabled */
   6315 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6316 			txs = &txq->txq_soft[i];
   6317 			if (txs->txs_mbuf != NULL) {
   6318 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6319 				m_freem(txs->txs_mbuf);
   6320 				txs->txs_mbuf = NULL;
   6321 			}
   6322 		}
   6323 		mutex_exit(txq->txq_lock);
   6324 	}
   6325 
   6326 	/* Mark the interface as down and cancel the watchdog timer. */
   6327 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6328 
   6329 	if (disable) {
   6330 		for (i = 0; i < sc->sc_nqueues; i++) {
   6331 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6332 			mutex_enter(rxq->rxq_lock);
   6333 			wm_rxdrain(rxq);
   6334 			mutex_exit(rxq->rxq_lock);
   6335 		}
   6336 	}
   6337 
   6338 #if 0 /* notyet */
   6339 	if (sc->sc_type >= WM_T_82544)
   6340 		CSR_WRITE(sc, WMREG_WUC, 0);
   6341 #endif
   6342 }
   6343 
   6344 static void
   6345 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6346 {
   6347 	struct mbuf *m;
   6348 	int i;
   6349 
   6350 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6351 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6352 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6353 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6354 		    m->m_data, m->m_len, m->m_flags);
   6355 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6356 	    i, i == 1 ? "" : "s");
   6357 }
   6358 
   6359 /*
   6360  * wm_82547_txfifo_stall:
   6361  *
   6362  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6363  *	reset the FIFO pointers, and restart packet transmission.
   6364  */
   6365 static void
   6366 wm_82547_txfifo_stall(void *arg)
   6367 {
   6368 	struct wm_softc *sc = arg;
   6369 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6370 
   6371 	mutex_enter(txq->txq_lock);
   6372 
   6373 	if (txq->txq_stopping)
   6374 		goto out;
   6375 
   6376 	if (txq->txq_fifo_stall) {
   6377 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6378 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6379 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6380 			/*
   6381 			 * Packets have drained.  Stop transmitter, reset
   6382 			 * FIFO pointers, restart transmitter, and kick
   6383 			 * the packet queue.
   6384 			 */
   6385 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6386 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6387 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6388 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6389 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6390 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6391 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6392 			CSR_WRITE_FLUSH(sc);
   6393 
   6394 			txq->txq_fifo_head = 0;
   6395 			txq->txq_fifo_stall = 0;
   6396 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6397 		} else {
   6398 			/*
   6399 			 * Still waiting for packets to drain; try again in
   6400 			 * another tick.
   6401 			 */
   6402 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6403 		}
   6404 	}
   6405 
   6406 out:
   6407 	mutex_exit(txq->txq_lock);
   6408 }
   6409 
   6410 /*
   6411  * wm_82547_txfifo_bugchk:
   6412  *
   6413  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6414  *	prevent enqueueing a packet that would wrap around the end
   6415  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6416  *
   6417  *	We do this by checking the amount of space before the end
   6418  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6419  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6420  *	the internal FIFO pointers to the beginning, and restart
   6421  *	transmission on the interface.
   6422  */
   6423 #define	WM_FIFO_HDR		0x10
   6424 #define	WM_82547_PAD_LEN	0x3e0
   6425 static int
   6426 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6427 {
   6428 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6429 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6430 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6431 
   6432 	/* Just return if already stalled. */
   6433 	if (txq->txq_fifo_stall)
   6434 		return 1;
   6435 
   6436 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6437 		/* Stall only occurs in half-duplex mode. */
   6438 		goto send_packet;
   6439 	}
   6440 
   6441 	if (len >= WM_82547_PAD_LEN + space) {
   6442 		txq->txq_fifo_stall = 1;
   6443 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6444 		return 1;
   6445 	}
   6446 
   6447  send_packet:
   6448 	txq->txq_fifo_head += len;
   6449 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6450 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6451 
   6452 	return 0;
   6453 }
   6454 
   6455 static int
   6456 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6457 {
   6458 	int error;
   6459 
   6460 	/*
   6461 	 * Allocate the control data structures, and create and load the
   6462 	 * DMA map for it.
   6463 	 *
   6464 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6465 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6466 	 * both sets within the same 4G segment.
   6467 	 */
   6468 	if (sc->sc_type < WM_T_82544)
   6469 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6470 	else
   6471 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6472 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6473 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6474 	else
   6475 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6476 
   6477 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6478 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6479 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6480 		aprint_error_dev(sc->sc_dev,
   6481 		    "unable to allocate TX control data, error = %d\n",
   6482 		    error);
   6483 		goto fail_0;
   6484 	}
   6485 
   6486 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6487 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6488 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6489 		aprint_error_dev(sc->sc_dev,
   6490 		    "unable to map TX control data, error = %d\n", error);
   6491 		goto fail_1;
   6492 	}
   6493 
   6494 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6495 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6496 		aprint_error_dev(sc->sc_dev,
   6497 		    "unable to create TX control data DMA map, error = %d\n",
   6498 		    error);
   6499 		goto fail_2;
   6500 	}
   6501 
   6502 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6503 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6504 		aprint_error_dev(sc->sc_dev,
   6505 		    "unable to load TX control data DMA map, error = %d\n",
   6506 		    error);
   6507 		goto fail_3;
   6508 	}
   6509 
   6510 	return 0;
   6511 
   6512  fail_3:
   6513 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6514  fail_2:
   6515 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6516 	    WM_TXDESCS_SIZE(txq));
   6517  fail_1:
   6518 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6519  fail_0:
   6520 	return error;
   6521 }
   6522 
   6523 static void
   6524 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6525 {
   6526 
   6527 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6528 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6529 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6530 	    WM_TXDESCS_SIZE(txq));
   6531 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6532 }
   6533 
   6534 static int
   6535 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6536 {
   6537 	int error;
   6538 	size_t rxq_descs_size;
   6539 
   6540 	/*
   6541 	 * Allocate the control data structures, and create and load the
   6542 	 * DMA map for it.
   6543 	 *
   6544 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6545 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6546 	 * both sets within the same 4G segment.
   6547 	 */
   6548 	rxq->rxq_ndesc = WM_NRXDESC;
   6549 	if (sc->sc_type == WM_T_82574)
   6550 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6551 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6552 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6553 	else
   6554 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6555 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6556 
   6557 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6558 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6559 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6560 		aprint_error_dev(sc->sc_dev,
   6561 		    "unable to allocate RX control data, error = %d\n",
   6562 		    error);
   6563 		goto fail_0;
   6564 	}
   6565 
   6566 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6567 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6568 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6569 		aprint_error_dev(sc->sc_dev,
   6570 		    "unable to map RX control data, error = %d\n", error);
   6571 		goto fail_1;
   6572 	}
   6573 
   6574 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6575 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6576 		aprint_error_dev(sc->sc_dev,
   6577 		    "unable to create RX control data DMA map, error = %d\n",
   6578 		    error);
   6579 		goto fail_2;
   6580 	}
   6581 
   6582 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6583 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6584 		aprint_error_dev(sc->sc_dev,
   6585 		    "unable to load RX control data DMA map, error = %d\n",
   6586 		    error);
   6587 		goto fail_3;
   6588 	}
   6589 
   6590 	return 0;
   6591 
   6592  fail_3:
   6593 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6594  fail_2:
   6595 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6596 	    rxq_descs_size);
   6597  fail_1:
   6598 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6599  fail_0:
   6600 	return error;
   6601 }
   6602 
   6603 static void
   6604 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6605 {
   6606 
   6607 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6608 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6609 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6610 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6611 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6612 }
   6613 
   6614 
   6615 static int
   6616 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6617 {
   6618 	int i, error;
   6619 
   6620 	/* Create the transmit buffer DMA maps. */
   6621 	WM_TXQUEUELEN(txq) =
   6622 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6623 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6624 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6625 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6626 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6627 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6628 			aprint_error_dev(sc->sc_dev,
   6629 			    "unable to create Tx DMA map %d, error = %d\n",
   6630 			    i, error);
   6631 			goto fail;
   6632 		}
   6633 	}
   6634 
   6635 	return 0;
   6636 
   6637  fail:
   6638 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6639 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6640 			bus_dmamap_destroy(sc->sc_dmat,
   6641 			    txq->txq_soft[i].txs_dmamap);
   6642 	}
   6643 	return error;
   6644 }
   6645 
   6646 static void
   6647 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6648 {
   6649 	int i;
   6650 
   6651 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6652 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6653 			bus_dmamap_destroy(sc->sc_dmat,
   6654 			    txq->txq_soft[i].txs_dmamap);
   6655 	}
   6656 }
   6657 
   6658 static int
   6659 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6660 {
   6661 	int i, error;
   6662 
   6663 	/* Create the receive buffer DMA maps. */
   6664 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6665 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6666 			    MCLBYTES, 0, 0,
   6667 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6668 			aprint_error_dev(sc->sc_dev,
   6669 			    "unable to create Rx DMA map %d error = %d\n",
   6670 			    i, error);
   6671 			goto fail;
   6672 		}
   6673 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6674 	}
   6675 
   6676 	return 0;
   6677 
   6678  fail:
   6679 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6680 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6681 			bus_dmamap_destroy(sc->sc_dmat,
   6682 			    rxq->rxq_soft[i].rxs_dmamap);
   6683 	}
   6684 	return error;
   6685 }
   6686 
   6687 static void
   6688 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6689 {
   6690 	int i;
   6691 
   6692 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6693 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6694 			bus_dmamap_destroy(sc->sc_dmat,
   6695 			    rxq->rxq_soft[i].rxs_dmamap);
   6696 	}
   6697 }
   6698 
   6699 /*
   6700  * wm_alloc_quques:
   6701  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6702  */
   6703 static int
   6704 wm_alloc_txrx_queues(struct wm_softc *sc)
   6705 {
   6706 	int i, error, tx_done, rx_done;
   6707 
   6708 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6709 	    KM_SLEEP);
   6710 	if (sc->sc_queue == NULL) {
   6711 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6712 		error = ENOMEM;
   6713 		goto fail_0;
   6714 	}
   6715 
   6716 	/*
   6717 	 * For transmission
   6718 	 */
   6719 	error = 0;
   6720 	tx_done = 0;
   6721 	for (i = 0; i < sc->sc_nqueues; i++) {
   6722 #ifdef WM_EVENT_COUNTERS
   6723 		int j;
   6724 		const char *xname;
   6725 #endif
   6726 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6727 		txq->txq_sc = sc;
   6728 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6729 
   6730 		error = wm_alloc_tx_descs(sc, txq);
   6731 		if (error)
   6732 			break;
   6733 		error = wm_alloc_tx_buffer(sc, txq);
   6734 		if (error) {
   6735 			wm_free_tx_descs(sc, txq);
   6736 			break;
   6737 		}
   6738 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6739 		if (txq->txq_interq == NULL) {
   6740 			wm_free_tx_descs(sc, txq);
   6741 			wm_free_tx_buffer(sc, txq);
   6742 			error = ENOMEM;
   6743 			break;
   6744 		}
   6745 
   6746 #ifdef WM_EVENT_COUNTERS
   6747 		xname = device_xname(sc->sc_dev);
   6748 
   6749 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6750 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6751 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6752 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6753 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6754 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6755 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6756 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6757 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6758 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6759 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6760 
   6761 		for (j = 0; j < WM_NTXSEGS; j++) {
   6762 			snprintf(txq->txq_txseg_evcnt_names[j],
   6763 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6764 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6765 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6766 		}
   6767 
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6772 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6773 #endif /* WM_EVENT_COUNTERS */
   6774 
   6775 		tx_done++;
   6776 	}
   6777 	if (error)
   6778 		goto fail_1;
   6779 
   6780 	/*
   6781 	 * For recieve
   6782 	 */
   6783 	error = 0;
   6784 	rx_done = 0;
   6785 	for (i = 0; i < sc->sc_nqueues; i++) {
   6786 #ifdef WM_EVENT_COUNTERS
   6787 		const char *xname;
   6788 #endif
   6789 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6790 		rxq->rxq_sc = sc;
   6791 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6792 
   6793 		error = wm_alloc_rx_descs(sc, rxq);
   6794 		if (error)
   6795 			break;
   6796 
   6797 		error = wm_alloc_rx_buffer(sc, rxq);
   6798 		if (error) {
   6799 			wm_free_rx_descs(sc, rxq);
   6800 			break;
   6801 		}
   6802 
   6803 #ifdef WM_EVENT_COUNTERS
   6804 		xname = device_xname(sc->sc_dev);
   6805 
   6806 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6807 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6808 
   6809 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6810 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6811 #endif /* WM_EVENT_COUNTERS */
   6812 
   6813 		rx_done++;
   6814 	}
   6815 	if (error)
   6816 		goto fail_2;
   6817 
   6818 	return 0;
   6819 
   6820  fail_2:
   6821 	for (i = 0; i < rx_done; i++) {
   6822 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6823 		wm_free_rx_buffer(sc, rxq);
   6824 		wm_free_rx_descs(sc, rxq);
   6825 		if (rxq->rxq_lock)
   6826 			mutex_obj_free(rxq->rxq_lock);
   6827 	}
   6828  fail_1:
   6829 	for (i = 0; i < tx_done; i++) {
   6830 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6831 		pcq_destroy(txq->txq_interq);
   6832 		wm_free_tx_buffer(sc, txq);
   6833 		wm_free_tx_descs(sc, txq);
   6834 		if (txq->txq_lock)
   6835 			mutex_obj_free(txq->txq_lock);
   6836 	}
   6837 
   6838 	kmem_free(sc->sc_queue,
   6839 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6840  fail_0:
   6841 	return error;
   6842 }
   6843 
   6844 /*
   6845  * wm_free_quques:
   6846  *	Free {tx,rx}descs and {tx,rx} buffers
   6847  */
   6848 static void
   6849 wm_free_txrx_queues(struct wm_softc *sc)
   6850 {
   6851 	int i;
   6852 
   6853 	for (i = 0; i < sc->sc_nqueues; i++) {
   6854 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6855 
   6856 #ifdef WM_EVENT_COUNTERS
   6857 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6858 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6859 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6860 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6861 #endif /* WM_EVENT_COUNTERS */
   6862 
   6863 		wm_free_rx_buffer(sc, rxq);
   6864 		wm_free_rx_descs(sc, rxq);
   6865 		if (rxq->rxq_lock)
   6866 			mutex_obj_free(rxq->rxq_lock);
   6867 	}
   6868 
   6869 	for (i = 0; i < sc->sc_nqueues; i++) {
   6870 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6871 		struct mbuf *m;
   6872 #ifdef WM_EVENT_COUNTERS
   6873 		int j;
   6874 
   6875 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6876 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6877 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6878 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6879 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6880 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6881 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6882 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6883 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6884 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6885 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6886 
   6887 		for (j = 0; j < WM_NTXSEGS; j++)
   6888 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6889 
   6890 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6893 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6894 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6895 #endif /* WM_EVENT_COUNTERS */
   6896 
   6897 		/* drain txq_interq */
   6898 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6899 			m_freem(m);
   6900 		pcq_destroy(txq->txq_interq);
   6901 
   6902 		wm_free_tx_buffer(sc, txq);
   6903 		wm_free_tx_descs(sc, txq);
   6904 		if (txq->txq_lock)
   6905 			mutex_obj_free(txq->txq_lock);
   6906 	}
   6907 
   6908 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6909 }
   6910 
   6911 static void
   6912 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6913 {
   6914 
   6915 	KASSERT(mutex_owned(txq->txq_lock));
   6916 
   6917 	/* Initialize the transmit descriptor ring. */
   6918 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6919 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6920 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6921 	txq->txq_free = WM_NTXDESC(txq);
   6922 	txq->txq_next = 0;
   6923 }
   6924 
   6925 static void
   6926 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6927     struct wm_txqueue *txq)
   6928 {
   6929 
   6930 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6931 		device_xname(sc->sc_dev), __func__));
   6932 	KASSERT(mutex_owned(txq->txq_lock));
   6933 
   6934 	if (sc->sc_type < WM_T_82543) {
   6935 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6936 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6937 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6938 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6939 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6940 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6941 	} else {
   6942 		int qid = wmq->wmq_id;
   6943 
   6944 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6945 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6946 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6947 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6948 
   6949 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6950 			/*
   6951 			 * Don't write TDT before TCTL.EN is set.
   6952 			 * See the document.
   6953 			 */
   6954 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6955 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6956 			    | TXDCTL_WTHRESH(0));
   6957 		else {
   6958 			/* XXX should update with AIM? */
   6959 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6960 			if (sc->sc_type >= WM_T_82540) {
   6961 				/* should be same */
   6962 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6963 			}
   6964 
   6965 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6966 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6967 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6968 		}
   6969 	}
   6970 }
   6971 
   6972 static void
   6973 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6974 {
   6975 	int i;
   6976 
   6977 	KASSERT(mutex_owned(txq->txq_lock));
   6978 
   6979 	/* Initialize the transmit job descriptors. */
   6980 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6981 		txq->txq_soft[i].txs_mbuf = NULL;
   6982 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6983 	txq->txq_snext = 0;
   6984 	txq->txq_sdirty = 0;
   6985 }
   6986 
   6987 static void
   6988 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6989     struct wm_txqueue *txq)
   6990 {
   6991 
   6992 	KASSERT(mutex_owned(txq->txq_lock));
   6993 
   6994 	/*
   6995 	 * Set up some register offsets that are different between
   6996 	 * the i82542 and the i82543 and later chips.
   6997 	 */
   6998 	if (sc->sc_type < WM_T_82543)
   6999 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7000 	else
   7001 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7002 
   7003 	wm_init_tx_descs(sc, txq);
   7004 	wm_init_tx_regs(sc, wmq, txq);
   7005 	wm_init_tx_buffer(sc, txq);
   7006 
   7007 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7008 	txq->txq_sending = false;
   7009 }
   7010 
   7011 static void
   7012 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7013     struct wm_rxqueue *rxq)
   7014 {
   7015 
   7016 	KASSERT(mutex_owned(rxq->rxq_lock));
   7017 
   7018 	/*
   7019 	 * Initialize the receive descriptor and receive job
   7020 	 * descriptor rings.
   7021 	 */
   7022 	if (sc->sc_type < WM_T_82543) {
   7023 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7024 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7025 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7026 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7027 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7028 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7029 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7030 
   7031 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7032 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7033 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7034 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7035 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7036 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7037 	} else {
   7038 		int qid = wmq->wmq_id;
   7039 
   7040 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7041 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7042 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7043 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7044 
   7045 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7046 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7047 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7048 
   7049 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7050 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7051 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7052 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7053 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7054 			    | RXDCTL_WTHRESH(1));
   7055 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7056 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7057 		} else {
   7058 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7059 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7060 			/* XXX should update with AIM? */
   7061 			CSR_WRITE(sc, WMREG_RDTR,
   7062 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7063 			/* MUST be same */
   7064 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7065 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7066 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7067 		}
   7068 	}
   7069 }
   7070 
   7071 static int
   7072 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7073 {
   7074 	struct wm_rxsoft *rxs;
   7075 	int error, i;
   7076 
   7077 	KASSERT(mutex_owned(rxq->rxq_lock));
   7078 
   7079 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7080 		rxs = &rxq->rxq_soft[i];
   7081 		if (rxs->rxs_mbuf == NULL) {
   7082 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7083 				log(LOG_ERR, "%s: unable to allocate or map "
   7084 				    "rx buffer %d, error = %d\n",
   7085 				    device_xname(sc->sc_dev), i, error);
   7086 				/*
   7087 				 * XXX Should attempt to run with fewer receive
   7088 				 * XXX buffers instead of just failing.
   7089 				 */
   7090 				wm_rxdrain(rxq);
   7091 				return ENOMEM;
   7092 			}
   7093 		} else {
   7094 			/*
   7095 			 * For 82575 and 82576, the RX descriptors must be
   7096 			 * initialized after the setting of RCTL.EN in
   7097 			 * wm_set_filter()
   7098 			 */
   7099 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7100 				wm_init_rxdesc(rxq, i);
   7101 		}
   7102 	}
   7103 	rxq->rxq_ptr = 0;
   7104 	rxq->rxq_discard = 0;
   7105 	WM_RXCHAIN_RESET(rxq);
   7106 
   7107 	return 0;
   7108 }
   7109 
   7110 static int
   7111 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7112     struct wm_rxqueue *rxq)
   7113 {
   7114 
   7115 	KASSERT(mutex_owned(rxq->rxq_lock));
   7116 
   7117 	/*
   7118 	 * Set up some register offsets that are different between
   7119 	 * the i82542 and the i82543 and later chips.
   7120 	 */
   7121 	if (sc->sc_type < WM_T_82543)
   7122 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7123 	else
   7124 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7125 
   7126 	wm_init_rx_regs(sc, wmq, rxq);
   7127 	return wm_init_rx_buffer(sc, rxq);
   7128 }
   7129 
   7130 /*
   7131  * wm_init_quques:
   7132  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7133  */
   7134 static int
   7135 wm_init_txrx_queues(struct wm_softc *sc)
   7136 {
   7137 	int i, error = 0;
   7138 
   7139 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7140 		device_xname(sc->sc_dev), __func__));
   7141 
   7142 	for (i = 0; i < sc->sc_nqueues; i++) {
   7143 		struct wm_queue *wmq = &sc->sc_queue[i];
   7144 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7145 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7146 
   7147 		/*
   7148 		 * TODO
   7149 		 * Currently, use constant variable instead of AIM.
   7150 		 * Furthermore, the interrupt interval of multiqueue which use
   7151 		 * polling mode is less than default value.
   7152 		 * More tuning and AIM are required.
   7153 		 */
   7154 		if (wm_is_using_multiqueue(sc))
   7155 			wmq->wmq_itr = 50;
   7156 		else
   7157 			wmq->wmq_itr = sc->sc_itr_init;
   7158 		wmq->wmq_set_itr = true;
   7159 
   7160 		mutex_enter(txq->txq_lock);
   7161 		wm_init_tx_queue(sc, wmq, txq);
   7162 		mutex_exit(txq->txq_lock);
   7163 
   7164 		mutex_enter(rxq->rxq_lock);
   7165 		error = wm_init_rx_queue(sc, wmq, rxq);
   7166 		mutex_exit(rxq->rxq_lock);
   7167 		if (error)
   7168 			break;
   7169 	}
   7170 
   7171 	return error;
   7172 }
   7173 
   7174 /*
   7175  * wm_tx_offload:
   7176  *
   7177  *	Set up TCP/IP checksumming parameters for the
   7178  *	specified packet.
   7179  */
   7180 static int
   7181 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7182     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7183 {
   7184 	struct mbuf *m0 = txs->txs_mbuf;
   7185 	struct livengood_tcpip_ctxdesc *t;
   7186 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7187 	uint32_t ipcse;
   7188 	struct ether_header *eh;
   7189 	int offset, iphl;
   7190 	uint8_t fields;
   7191 
   7192 	/*
   7193 	 * XXX It would be nice if the mbuf pkthdr had offset
   7194 	 * fields for the protocol headers.
   7195 	 */
   7196 
   7197 	eh = mtod(m0, struct ether_header *);
   7198 	switch (htons(eh->ether_type)) {
   7199 	case ETHERTYPE_IP:
   7200 	case ETHERTYPE_IPV6:
   7201 		offset = ETHER_HDR_LEN;
   7202 		break;
   7203 
   7204 	case ETHERTYPE_VLAN:
   7205 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7206 		break;
   7207 
   7208 	default:
   7209 		/*
   7210 		 * Don't support this protocol or encapsulation.
   7211 		 */
   7212 		*fieldsp = 0;
   7213 		*cmdp = 0;
   7214 		return 0;
   7215 	}
   7216 
   7217 	if ((m0->m_pkthdr.csum_flags &
   7218 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7219 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7220 	} else
   7221 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7222 
   7223 	ipcse = offset + iphl - 1;
   7224 
   7225 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7226 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7227 	seg = 0;
   7228 	fields = 0;
   7229 
   7230 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7231 		int hlen = offset + iphl;
   7232 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7233 
   7234 		if (__predict_false(m0->m_len <
   7235 				    (hlen + sizeof(struct tcphdr)))) {
   7236 			/*
   7237 			 * TCP/IP headers are not in the first mbuf; we need
   7238 			 * to do this the slow and painful way. Let's just
   7239 			 * hope this doesn't happen very often.
   7240 			 */
   7241 			struct tcphdr th;
   7242 
   7243 			WM_Q_EVCNT_INCR(txq, tsopain);
   7244 
   7245 			m_copydata(m0, hlen, sizeof(th), &th);
   7246 			if (v4) {
   7247 				struct ip ip;
   7248 
   7249 				m_copydata(m0, offset, sizeof(ip), &ip);
   7250 				ip.ip_len = 0;
   7251 				m_copyback(m0,
   7252 				    offset + offsetof(struct ip, ip_len),
   7253 				    sizeof(ip.ip_len), &ip.ip_len);
   7254 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7255 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7256 			} else {
   7257 				struct ip6_hdr ip6;
   7258 
   7259 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7260 				ip6.ip6_plen = 0;
   7261 				m_copyback(m0,
   7262 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7263 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7264 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7265 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7266 			}
   7267 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7268 			    sizeof(th.th_sum), &th.th_sum);
   7269 
   7270 			hlen += th.th_off << 2;
   7271 		} else {
   7272 			/*
   7273 			 * TCP/IP headers are in the first mbuf; we can do
   7274 			 * this the easy way.
   7275 			 */
   7276 			struct tcphdr *th;
   7277 
   7278 			if (v4) {
   7279 				struct ip *ip =
   7280 				    (void *)(mtod(m0, char *) + offset);
   7281 				th = (void *)(mtod(m0, char *) + hlen);
   7282 
   7283 				ip->ip_len = 0;
   7284 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7285 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7286 			} else {
   7287 				struct ip6_hdr *ip6 =
   7288 				    (void *)(mtod(m0, char *) + offset);
   7289 				th = (void *)(mtod(m0, char *) + hlen);
   7290 
   7291 				ip6->ip6_plen = 0;
   7292 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7293 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7294 			}
   7295 			hlen += th->th_off << 2;
   7296 		}
   7297 
   7298 		if (v4) {
   7299 			WM_Q_EVCNT_INCR(txq, tso);
   7300 			cmdlen |= WTX_TCPIP_CMD_IP;
   7301 		} else {
   7302 			WM_Q_EVCNT_INCR(txq, tso6);
   7303 			ipcse = 0;
   7304 		}
   7305 		cmd |= WTX_TCPIP_CMD_TSE;
   7306 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7307 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7308 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7309 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7310 	}
   7311 
   7312 	/*
   7313 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7314 	 * offload feature, if we load the context descriptor, we
   7315 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7316 	 */
   7317 
   7318 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7319 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7320 	    WTX_TCPIP_IPCSE(ipcse);
   7321 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7322 		WM_Q_EVCNT_INCR(txq, ipsum);
   7323 		fields |= WTX_IXSM;
   7324 	}
   7325 
   7326 	offset += iphl;
   7327 
   7328 	if (m0->m_pkthdr.csum_flags &
   7329 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7330 		WM_Q_EVCNT_INCR(txq, tusum);
   7331 		fields |= WTX_TXSM;
   7332 		tucs = WTX_TCPIP_TUCSS(offset) |
   7333 		    WTX_TCPIP_TUCSO(offset +
   7334 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7335 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7336 	} else if ((m0->m_pkthdr.csum_flags &
   7337 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7338 		WM_Q_EVCNT_INCR(txq, tusum6);
   7339 		fields |= WTX_TXSM;
   7340 		tucs = WTX_TCPIP_TUCSS(offset) |
   7341 		    WTX_TCPIP_TUCSO(offset +
   7342 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7343 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7344 	} else {
   7345 		/* Just initialize it to a valid TCP context. */
   7346 		tucs = WTX_TCPIP_TUCSS(offset) |
   7347 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7348 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7349 	}
   7350 
   7351 	/*
   7352 	 * We don't have to write context descriptor for every packet
   7353 	 * except for 82574. For 82574, we must write context descriptor
   7354 	 * for every packet when we use two descriptor queues.
   7355 	 * It would be overhead to write context descriptor for every packet,
   7356 	 * however it does not cause problems.
   7357 	 */
   7358 	/* Fill in the context descriptor. */
   7359 	t = (struct livengood_tcpip_ctxdesc *)
   7360 	    &txq->txq_descs[txq->txq_next];
   7361 	t->tcpip_ipcs = htole32(ipcs);
   7362 	t->tcpip_tucs = htole32(tucs);
   7363 	t->tcpip_cmdlen = htole32(cmdlen);
   7364 	t->tcpip_seg = htole32(seg);
   7365 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7366 
   7367 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7368 	txs->txs_ndesc++;
   7369 
   7370 	*cmdp = cmd;
   7371 	*fieldsp = fields;
   7372 
   7373 	return 0;
   7374 }
   7375 
   7376 static inline int
   7377 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7378 {
   7379 	struct wm_softc *sc = ifp->if_softc;
   7380 	u_int cpuid = cpu_index(curcpu());
   7381 
   7382 	/*
   7383 	 * Currently, simple distribute strategy.
   7384 	 * TODO:
   7385 	 * distribute by flowid(RSS has value).
   7386 	 */
   7387 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7388 }
   7389 
   7390 /*
   7391  * wm_start:		[ifnet interface function]
   7392  *
   7393  *	Start packet transmission on the interface.
   7394  */
   7395 static void
   7396 wm_start(struct ifnet *ifp)
   7397 {
   7398 	struct wm_softc *sc = ifp->if_softc;
   7399 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7400 
   7401 #ifdef WM_MPSAFE
   7402 	KASSERT(if_is_mpsafe(ifp));
   7403 #endif
   7404 	/*
   7405 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7406 	 */
   7407 
   7408 	mutex_enter(txq->txq_lock);
   7409 	if (!txq->txq_stopping)
   7410 		wm_start_locked(ifp);
   7411 	mutex_exit(txq->txq_lock);
   7412 }
   7413 
   7414 static void
   7415 wm_start_locked(struct ifnet *ifp)
   7416 {
   7417 	struct wm_softc *sc = ifp->if_softc;
   7418 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7419 
   7420 	wm_send_common_locked(ifp, txq, false);
   7421 }
   7422 
   7423 static int
   7424 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7425 {
   7426 	int qid;
   7427 	struct wm_softc *sc = ifp->if_softc;
   7428 	struct wm_txqueue *txq;
   7429 
   7430 	qid = wm_select_txqueue(ifp, m);
   7431 	txq = &sc->sc_queue[qid].wmq_txq;
   7432 
   7433 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7434 		m_freem(m);
   7435 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7436 		return ENOBUFS;
   7437 	}
   7438 
   7439 	/*
   7440 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7441 	 */
   7442 	ifp->if_obytes += m->m_pkthdr.len;
   7443 	if (m->m_flags & M_MCAST)
   7444 		ifp->if_omcasts++;
   7445 
   7446 	if (mutex_tryenter(txq->txq_lock)) {
   7447 		if (!txq->txq_stopping)
   7448 			wm_transmit_locked(ifp, txq);
   7449 		mutex_exit(txq->txq_lock);
   7450 	}
   7451 
   7452 	return 0;
   7453 }
   7454 
   7455 static void
   7456 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7457 {
   7458 
   7459 	wm_send_common_locked(ifp, txq, true);
   7460 }
   7461 
   7462 static void
   7463 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7464     bool is_transmit)
   7465 {
   7466 	struct wm_softc *sc = ifp->if_softc;
   7467 	struct mbuf *m0;
   7468 	struct wm_txsoft *txs;
   7469 	bus_dmamap_t dmamap;
   7470 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7471 	bus_addr_t curaddr;
   7472 	bus_size_t seglen, curlen;
   7473 	uint32_t cksumcmd;
   7474 	uint8_t cksumfields;
   7475 	bool remap = true;
   7476 
   7477 	KASSERT(mutex_owned(txq->txq_lock));
   7478 
   7479 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7480 		return;
   7481 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7482 		return;
   7483 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7484 		return;
   7485 
   7486 	/* Remember the previous number of free descriptors. */
   7487 	ofree = txq->txq_free;
   7488 
   7489 	/*
   7490 	 * Loop through the send queue, setting up transmit descriptors
   7491 	 * until we drain the queue, or use up all available transmit
   7492 	 * descriptors.
   7493 	 */
   7494 	for (;;) {
   7495 		m0 = NULL;
   7496 
   7497 		/* Get a work queue entry. */
   7498 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7499 			wm_txeof(txq, UINT_MAX);
   7500 			if (txq->txq_sfree == 0) {
   7501 				DPRINTF(WM_DEBUG_TX,
   7502 				    ("%s: TX: no free job descriptors\n",
   7503 					device_xname(sc->sc_dev)));
   7504 				WM_Q_EVCNT_INCR(txq, txsstall);
   7505 				break;
   7506 			}
   7507 		}
   7508 
   7509 		/* Grab a packet off the queue. */
   7510 		if (is_transmit)
   7511 			m0 = pcq_get(txq->txq_interq);
   7512 		else
   7513 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7514 		if (m0 == NULL)
   7515 			break;
   7516 
   7517 		DPRINTF(WM_DEBUG_TX,
   7518 		    ("%s: TX: have packet to transmit: %p\n",
   7519 			device_xname(sc->sc_dev), m0));
   7520 
   7521 		txs = &txq->txq_soft[txq->txq_snext];
   7522 		dmamap = txs->txs_dmamap;
   7523 
   7524 		use_tso = (m0->m_pkthdr.csum_flags &
   7525 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7526 
   7527 		/*
   7528 		 * So says the Linux driver:
   7529 		 * The controller does a simple calculation to make sure
   7530 		 * there is enough room in the FIFO before initiating the
   7531 		 * DMA for each buffer. The calc is:
   7532 		 *	4 = ceil(buffer len / MSS)
   7533 		 * To make sure we don't overrun the FIFO, adjust the max
   7534 		 * buffer len if the MSS drops.
   7535 		 */
   7536 		dmamap->dm_maxsegsz =
   7537 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7538 		    ? m0->m_pkthdr.segsz << 2
   7539 		    : WTX_MAX_LEN;
   7540 
   7541 		/*
   7542 		 * Load the DMA map.  If this fails, the packet either
   7543 		 * didn't fit in the allotted number of segments, or we
   7544 		 * were short on resources.  For the too-many-segments
   7545 		 * case, we simply report an error and drop the packet,
   7546 		 * since we can't sanely copy a jumbo packet to a single
   7547 		 * buffer.
   7548 		 */
   7549 retry:
   7550 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7551 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7552 		if (__predict_false(error)) {
   7553 			if (error == EFBIG) {
   7554 				if (remap == true) {
   7555 					struct mbuf *m;
   7556 
   7557 					remap = false;
   7558 					m = m_defrag(m0, M_NOWAIT);
   7559 					if (m != NULL) {
   7560 						WM_Q_EVCNT_INCR(txq, defrag);
   7561 						m0 = m;
   7562 						goto retry;
   7563 					}
   7564 				}
   7565 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7566 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7567 				    "DMA segments, dropping...\n",
   7568 				    device_xname(sc->sc_dev));
   7569 				wm_dump_mbuf_chain(sc, m0);
   7570 				m_freem(m0);
   7571 				continue;
   7572 			}
   7573 			/*  Short on resources, just stop for now. */
   7574 			DPRINTF(WM_DEBUG_TX,
   7575 			    ("%s: TX: dmamap load failed: %d\n",
   7576 				device_xname(sc->sc_dev), error));
   7577 			break;
   7578 		}
   7579 
   7580 		segs_needed = dmamap->dm_nsegs;
   7581 		if (use_tso) {
   7582 			/* For sentinel descriptor; see below. */
   7583 			segs_needed++;
   7584 		}
   7585 
   7586 		/*
   7587 		 * Ensure we have enough descriptors free to describe
   7588 		 * the packet. Note, we always reserve one descriptor
   7589 		 * at the end of the ring due to the semantics of the
   7590 		 * TDT register, plus one more in the event we need
   7591 		 * to load offload context.
   7592 		 */
   7593 		if (segs_needed > txq->txq_free - 2) {
   7594 			/*
   7595 			 * Not enough free descriptors to transmit this
   7596 			 * packet.  We haven't committed anything yet,
   7597 			 * so just unload the DMA map, put the packet
   7598 			 * pack on the queue, and punt. Notify the upper
   7599 			 * layer that there are no more slots left.
   7600 			 */
   7601 			DPRINTF(WM_DEBUG_TX,
   7602 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7603 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7604 				segs_needed, txq->txq_free - 1));
   7605 			if (!is_transmit)
   7606 				ifp->if_flags |= IFF_OACTIVE;
   7607 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7608 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7609 			WM_Q_EVCNT_INCR(txq, txdstall);
   7610 			break;
   7611 		}
   7612 
   7613 		/*
   7614 		 * Check for 82547 Tx FIFO bug. We need to do this
   7615 		 * once we know we can transmit the packet, since we
   7616 		 * do some internal FIFO space accounting here.
   7617 		 */
   7618 		if (sc->sc_type == WM_T_82547 &&
   7619 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7620 			DPRINTF(WM_DEBUG_TX,
   7621 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7622 				device_xname(sc->sc_dev)));
   7623 			if (!is_transmit)
   7624 				ifp->if_flags |= IFF_OACTIVE;
   7625 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7626 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7627 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7628 			break;
   7629 		}
   7630 
   7631 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7632 
   7633 		DPRINTF(WM_DEBUG_TX,
   7634 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7635 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7636 
   7637 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7638 
   7639 		/*
   7640 		 * Store a pointer to the packet so that we can free it
   7641 		 * later.
   7642 		 *
   7643 		 * Initially, we consider the number of descriptors the
   7644 		 * packet uses the number of DMA segments.  This may be
   7645 		 * incremented by 1 if we do checksum offload (a descriptor
   7646 		 * is used to set the checksum context).
   7647 		 */
   7648 		txs->txs_mbuf = m0;
   7649 		txs->txs_firstdesc = txq->txq_next;
   7650 		txs->txs_ndesc = segs_needed;
   7651 
   7652 		/* Set up offload parameters for this packet. */
   7653 		if (m0->m_pkthdr.csum_flags &
   7654 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7655 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7656 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7657 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7658 					  &cksumfields) != 0) {
   7659 				/* Error message already displayed. */
   7660 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7661 				continue;
   7662 			}
   7663 		} else {
   7664 			cksumcmd = 0;
   7665 			cksumfields = 0;
   7666 		}
   7667 
   7668 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7669 
   7670 		/* Sync the DMA map. */
   7671 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7672 		    BUS_DMASYNC_PREWRITE);
   7673 
   7674 		/* Initialize the transmit descriptor. */
   7675 		for (nexttx = txq->txq_next, seg = 0;
   7676 		     seg < dmamap->dm_nsegs; seg++) {
   7677 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7678 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7679 			     seglen != 0;
   7680 			     curaddr += curlen, seglen -= curlen,
   7681 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7682 				curlen = seglen;
   7683 
   7684 				/*
   7685 				 * So says the Linux driver:
   7686 				 * Work around for premature descriptor
   7687 				 * write-backs in TSO mode.  Append a
   7688 				 * 4-byte sentinel descriptor.
   7689 				 */
   7690 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7691 				    curlen > 8)
   7692 					curlen -= 4;
   7693 
   7694 				wm_set_dma_addr(
   7695 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7696 				txq->txq_descs[nexttx].wtx_cmdlen
   7697 				    = htole32(cksumcmd | curlen);
   7698 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7699 				    = 0;
   7700 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7701 				    = cksumfields;
   7702 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7703 				lasttx = nexttx;
   7704 
   7705 				DPRINTF(WM_DEBUG_TX,
   7706 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7707 					"len %#04zx\n",
   7708 					device_xname(sc->sc_dev), nexttx,
   7709 					(uint64_t)curaddr, curlen));
   7710 			}
   7711 		}
   7712 
   7713 		KASSERT(lasttx != -1);
   7714 
   7715 		/*
   7716 		 * Set up the command byte on the last descriptor of
   7717 		 * the packet. If we're in the interrupt delay window,
   7718 		 * delay the interrupt.
   7719 		 */
   7720 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7721 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7722 
   7723 		/*
   7724 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7725 		 * up the descriptor to encapsulate the packet for us.
   7726 		 *
   7727 		 * This is only valid on the last descriptor of the packet.
   7728 		 */
   7729 		if (vlan_has_tag(m0)) {
   7730 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7731 			    htole32(WTX_CMD_VLE);
   7732 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7733 			    = htole16(vlan_get_tag(m0));
   7734 		}
   7735 
   7736 		txs->txs_lastdesc = lasttx;
   7737 
   7738 		DPRINTF(WM_DEBUG_TX,
   7739 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7740 			device_xname(sc->sc_dev),
   7741 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7742 
   7743 		/* Sync the descriptors we're using. */
   7744 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7745 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7746 
   7747 		/* Give the packet to the chip. */
   7748 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7749 
   7750 		DPRINTF(WM_DEBUG_TX,
   7751 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7752 
   7753 		DPRINTF(WM_DEBUG_TX,
   7754 		    ("%s: TX: finished transmitting packet, job %d\n",
   7755 			device_xname(sc->sc_dev), txq->txq_snext));
   7756 
   7757 		/* Advance the tx pointer. */
   7758 		txq->txq_free -= txs->txs_ndesc;
   7759 		txq->txq_next = nexttx;
   7760 
   7761 		txq->txq_sfree--;
   7762 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7763 
   7764 		/* Pass the packet to any BPF listeners. */
   7765 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7766 	}
   7767 
   7768 	if (m0 != NULL) {
   7769 		if (!is_transmit)
   7770 			ifp->if_flags |= IFF_OACTIVE;
   7771 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7772 		WM_Q_EVCNT_INCR(txq, descdrop);
   7773 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7774 			__func__));
   7775 		m_freem(m0);
   7776 	}
   7777 
   7778 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7779 		/* No more slots; notify upper layer. */
   7780 		if (!is_transmit)
   7781 			ifp->if_flags |= IFF_OACTIVE;
   7782 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7783 	}
   7784 
   7785 	if (txq->txq_free != ofree) {
   7786 		/* Set a watchdog timer in case the chip flakes out. */
   7787 		txq->txq_lastsent = time_uptime;
   7788 		txq->txq_sending = true;
   7789 	}
   7790 }
   7791 
   7792 /*
   7793  * wm_nq_tx_offload:
   7794  *
   7795  *	Set up TCP/IP checksumming parameters for the
   7796  *	specified packet, for NEWQUEUE devices
   7797  */
   7798 static int
   7799 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7800     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7801 {
   7802 	struct mbuf *m0 = txs->txs_mbuf;
   7803 	uint32_t vl_len, mssidx, cmdc;
   7804 	struct ether_header *eh;
   7805 	int offset, iphl;
   7806 
   7807 	/*
   7808 	 * XXX It would be nice if the mbuf pkthdr had offset
   7809 	 * fields for the protocol headers.
   7810 	 */
   7811 	*cmdlenp = 0;
   7812 	*fieldsp = 0;
   7813 
   7814 	eh = mtod(m0, struct ether_header *);
   7815 	switch (htons(eh->ether_type)) {
   7816 	case ETHERTYPE_IP:
   7817 	case ETHERTYPE_IPV6:
   7818 		offset = ETHER_HDR_LEN;
   7819 		break;
   7820 
   7821 	case ETHERTYPE_VLAN:
   7822 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7823 		break;
   7824 
   7825 	default:
   7826 		/* Don't support this protocol or encapsulation. */
   7827 		*do_csum = false;
   7828 		return 0;
   7829 	}
   7830 	*do_csum = true;
   7831 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7832 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7833 
   7834 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7835 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7836 
   7837 	if ((m0->m_pkthdr.csum_flags &
   7838 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7839 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7840 	} else {
   7841 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7842 	}
   7843 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7844 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7845 
   7846 	if (vlan_has_tag(m0)) {
   7847 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7848 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7849 		*cmdlenp |= NQTX_CMD_VLE;
   7850 	}
   7851 
   7852 	mssidx = 0;
   7853 
   7854 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7855 		int hlen = offset + iphl;
   7856 		int tcp_hlen;
   7857 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7858 
   7859 		if (__predict_false(m0->m_len <
   7860 				    (hlen + sizeof(struct tcphdr)))) {
   7861 			/*
   7862 			 * TCP/IP headers are not in the first mbuf; we need
   7863 			 * to do this the slow and painful way. Let's just
   7864 			 * hope this doesn't happen very often.
   7865 			 */
   7866 			struct tcphdr th;
   7867 
   7868 			WM_Q_EVCNT_INCR(txq, tsopain);
   7869 
   7870 			m_copydata(m0, hlen, sizeof(th), &th);
   7871 			if (v4) {
   7872 				struct ip ip;
   7873 
   7874 				m_copydata(m0, offset, sizeof(ip), &ip);
   7875 				ip.ip_len = 0;
   7876 				m_copyback(m0,
   7877 				    offset + offsetof(struct ip, ip_len),
   7878 				    sizeof(ip.ip_len), &ip.ip_len);
   7879 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7880 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7881 			} else {
   7882 				struct ip6_hdr ip6;
   7883 
   7884 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7885 				ip6.ip6_plen = 0;
   7886 				m_copyback(m0,
   7887 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7888 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7889 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7890 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7891 			}
   7892 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7893 			    sizeof(th.th_sum), &th.th_sum);
   7894 
   7895 			tcp_hlen = th.th_off << 2;
   7896 		} else {
   7897 			/*
   7898 			 * TCP/IP headers are in the first mbuf; we can do
   7899 			 * this the easy way.
   7900 			 */
   7901 			struct tcphdr *th;
   7902 
   7903 			if (v4) {
   7904 				struct ip *ip =
   7905 				    (void *)(mtod(m0, char *) + offset);
   7906 				th = (void *)(mtod(m0, char *) + hlen);
   7907 
   7908 				ip->ip_len = 0;
   7909 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7910 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7911 			} else {
   7912 				struct ip6_hdr *ip6 =
   7913 				    (void *)(mtod(m0, char *) + offset);
   7914 				th = (void *)(mtod(m0, char *) + hlen);
   7915 
   7916 				ip6->ip6_plen = 0;
   7917 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7918 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7919 			}
   7920 			tcp_hlen = th->th_off << 2;
   7921 		}
   7922 		hlen += tcp_hlen;
   7923 		*cmdlenp |= NQTX_CMD_TSE;
   7924 
   7925 		if (v4) {
   7926 			WM_Q_EVCNT_INCR(txq, tso);
   7927 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7928 		} else {
   7929 			WM_Q_EVCNT_INCR(txq, tso6);
   7930 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7931 		}
   7932 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7933 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7934 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7935 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7936 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7937 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7938 	} else {
   7939 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7940 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7941 	}
   7942 
   7943 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7944 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7945 		cmdc |= NQTXC_CMD_IP4;
   7946 	}
   7947 
   7948 	if (m0->m_pkthdr.csum_flags &
   7949 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7950 		WM_Q_EVCNT_INCR(txq, tusum);
   7951 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7952 			cmdc |= NQTXC_CMD_TCP;
   7953 		else
   7954 			cmdc |= NQTXC_CMD_UDP;
   7955 
   7956 		cmdc |= NQTXC_CMD_IP4;
   7957 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7958 	}
   7959 	if (m0->m_pkthdr.csum_flags &
   7960 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7961 		WM_Q_EVCNT_INCR(txq, tusum6);
   7962 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7963 			cmdc |= NQTXC_CMD_TCP;
   7964 		else
   7965 			cmdc |= NQTXC_CMD_UDP;
   7966 
   7967 		cmdc |= NQTXC_CMD_IP6;
   7968 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7969 	}
   7970 
   7971 	/*
   7972 	 * We don't have to write context descriptor for every packet to
   7973 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7974 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7975 	 * controllers.
   7976 	 * It would be overhead to write context descriptor for every packet,
   7977 	 * however it does not cause problems.
   7978 	 */
   7979 	/* Fill in the context descriptor. */
   7980 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7981 	    htole32(vl_len);
   7982 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7983 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7984 	    htole32(cmdc);
   7985 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7986 	    htole32(mssidx);
   7987 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7988 	DPRINTF(WM_DEBUG_TX,
   7989 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7990 		txq->txq_next, 0, vl_len));
   7991 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7992 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7993 	txs->txs_ndesc++;
   7994 	return 0;
   7995 }
   7996 
   7997 /*
   7998  * wm_nq_start:		[ifnet interface function]
   7999  *
   8000  *	Start packet transmission on the interface for NEWQUEUE devices
   8001  */
   8002 static void
   8003 wm_nq_start(struct ifnet *ifp)
   8004 {
   8005 	struct wm_softc *sc = ifp->if_softc;
   8006 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8007 
   8008 #ifdef WM_MPSAFE
   8009 	KASSERT(if_is_mpsafe(ifp));
   8010 #endif
   8011 	/*
   8012 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8013 	 */
   8014 
   8015 	mutex_enter(txq->txq_lock);
   8016 	if (!txq->txq_stopping)
   8017 		wm_nq_start_locked(ifp);
   8018 	mutex_exit(txq->txq_lock);
   8019 }
   8020 
   8021 static void
   8022 wm_nq_start_locked(struct ifnet *ifp)
   8023 {
   8024 	struct wm_softc *sc = ifp->if_softc;
   8025 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8026 
   8027 	wm_nq_send_common_locked(ifp, txq, false);
   8028 }
   8029 
   8030 static int
   8031 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8032 {
   8033 	int qid;
   8034 	struct wm_softc *sc = ifp->if_softc;
   8035 	struct wm_txqueue *txq;
   8036 
   8037 	qid = wm_select_txqueue(ifp, m);
   8038 	txq = &sc->sc_queue[qid].wmq_txq;
   8039 
   8040 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8041 		m_freem(m);
   8042 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8043 		return ENOBUFS;
   8044 	}
   8045 
   8046 	/*
   8047 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   8048 	 */
   8049 	ifp->if_obytes += m->m_pkthdr.len;
   8050 	if (m->m_flags & M_MCAST)
   8051 		ifp->if_omcasts++;
   8052 
   8053 	/*
   8054 	 * The situations which this mutex_tryenter() fails at running time
   8055 	 * are below two patterns.
   8056 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8057 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8058 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8059 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8060 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8061 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8062 	 * stuck, either.
   8063 	 */
   8064 	if (mutex_tryenter(txq->txq_lock)) {
   8065 		if (!txq->txq_stopping)
   8066 			wm_nq_transmit_locked(ifp, txq);
   8067 		mutex_exit(txq->txq_lock);
   8068 	}
   8069 
   8070 	return 0;
   8071 }
   8072 
   8073 static void
   8074 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8075 {
   8076 
   8077 	wm_nq_send_common_locked(ifp, txq, true);
   8078 }
   8079 
   8080 static void
   8081 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8082     bool is_transmit)
   8083 {
   8084 	struct wm_softc *sc = ifp->if_softc;
   8085 	struct mbuf *m0;
   8086 	struct wm_txsoft *txs;
   8087 	bus_dmamap_t dmamap;
   8088 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8089 	bool do_csum, sent;
   8090 	bool remap = true;
   8091 
   8092 	KASSERT(mutex_owned(txq->txq_lock));
   8093 
   8094 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8095 		return;
   8096 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8097 		return;
   8098 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8099 		return;
   8100 
   8101 	sent = false;
   8102 
   8103 	/*
   8104 	 * Loop through the send queue, setting up transmit descriptors
   8105 	 * until we drain the queue, or use up all available transmit
   8106 	 * descriptors.
   8107 	 */
   8108 	for (;;) {
   8109 		m0 = NULL;
   8110 
   8111 		/* Get a work queue entry. */
   8112 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8113 			wm_txeof(txq, UINT_MAX);
   8114 			if (txq->txq_sfree == 0) {
   8115 				DPRINTF(WM_DEBUG_TX,
   8116 				    ("%s: TX: no free job descriptors\n",
   8117 					device_xname(sc->sc_dev)));
   8118 				WM_Q_EVCNT_INCR(txq, txsstall);
   8119 				break;
   8120 			}
   8121 		}
   8122 
   8123 		/* Grab a packet off the queue. */
   8124 		if (is_transmit)
   8125 			m0 = pcq_get(txq->txq_interq);
   8126 		else
   8127 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8128 		if (m0 == NULL)
   8129 			break;
   8130 
   8131 		DPRINTF(WM_DEBUG_TX,
   8132 		    ("%s: TX: have packet to transmit: %p\n",
   8133 		    device_xname(sc->sc_dev), m0));
   8134 
   8135 		txs = &txq->txq_soft[txq->txq_snext];
   8136 		dmamap = txs->txs_dmamap;
   8137 
   8138 		/*
   8139 		 * Load the DMA map.  If this fails, the packet either
   8140 		 * didn't fit in the allotted number of segments, or we
   8141 		 * were short on resources.  For the too-many-segments
   8142 		 * case, we simply report an error and drop the packet,
   8143 		 * since we can't sanely copy a jumbo packet to a single
   8144 		 * buffer.
   8145 		 */
   8146 retry:
   8147 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8148 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8149 		if (__predict_false(error)) {
   8150 			if (error == EFBIG) {
   8151 				if (remap == true) {
   8152 					struct mbuf *m;
   8153 
   8154 					remap = false;
   8155 					m = m_defrag(m0, M_NOWAIT);
   8156 					if (m != NULL) {
   8157 						WM_Q_EVCNT_INCR(txq, defrag);
   8158 						m0 = m;
   8159 						goto retry;
   8160 					}
   8161 				}
   8162 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8163 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8164 				    "DMA segments, dropping...\n",
   8165 				    device_xname(sc->sc_dev));
   8166 				wm_dump_mbuf_chain(sc, m0);
   8167 				m_freem(m0);
   8168 				continue;
   8169 			}
   8170 			/* Short on resources, just stop for now. */
   8171 			DPRINTF(WM_DEBUG_TX,
   8172 			    ("%s: TX: dmamap load failed: %d\n",
   8173 				device_xname(sc->sc_dev), error));
   8174 			break;
   8175 		}
   8176 
   8177 		segs_needed = dmamap->dm_nsegs;
   8178 
   8179 		/*
   8180 		 * Ensure we have enough descriptors free to describe
   8181 		 * the packet. Note, we always reserve one descriptor
   8182 		 * at the end of the ring due to the semantics of the
   8183 		 * TDT register, plus one more in the event we need
   8184 		 * to load offload context.
   8185 		 */
   8186 		if (segs_needed > txq->txq_free - 2) {
   8187 			/*
   8188 			 * Not enough free descriptors to transmit this
   8189 			 * packet.  We haven't committed anything yet,
   8190 			 * so just unload the DMA map, put the packet
   8191 			 * pack on the queue, and punt. Notify the upper
   8192 			 * layer that there are no more slots left.
   8193 			 */
   8194 			DPRINTF(WM_DEBUG_TX,
   8195 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8196 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8197 				segs_needed, txq->txq_free - 1));
   8198 			if (!is_transmit)
   8199 				ifp->if_flags |= IFF_OACTIVE;
   8200 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8201 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8202 			WM_Q_EVCNT_INCR(txq, txdstall);
   8203 			break;
   8204 		}
   8205 
   8206 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8207 
   8208 		DPRINTF(WM_DEBUG_TX,
   8209 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8210 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8211 
   8212 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8213 
   8214 		/*
   8215 		 * Store a pointer to the packet so that we can free it
   8216 		 * later.
   8217 		 *
   8218 		 * Initially, we consider the number of descriptors the
   8219 		 * packet uses the number of DMA segments.  This may be
   8220 		 * incremented by 1 if we do checksum offload (a descriptor
   8221 		 * is used to set the checksum context).
   8222 		 */
   8223 		txs->txs_mbuf = m0;
   8224 		txs->txs_firstdesc = txq->txq_next;
   8225 		txs->txs_ndesc = segs_needed;
   8226 
   8227 		/* Set up offload parameters for this packet. */
   8228 		uint32_t cmdlen, fields, dcmdlen;
   8229 		if (m0->m_pkthdr.csum_flags &
   8230 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8231 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8232 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8233 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8234 			    &do_csum) != 0) {
   8235 				/* Error message already displayed. */
   8236 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8237 				continue;
   8238 			}
   8239 		} else {
   8240 			do_csum = false;
   8241 			cmdlen = 0;
   8242 			fields = 0;
   8243 		}
   8244 
   8245 		/* Sync the DMA map. */
   8246 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8247 		    BUS_DMASYNC_PREWRITE);
   8248 
   8249 		/* Initialize the first transmit descriptor. */
   8250 		nexttx = txq->txq_next;
   8251 		if (!do_csum) {
   8252 			/* setup a legacy descriptor */
   8253 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8254 			    dmamap->dm_segs[0].ds_addr);
   8255 			txq->txq_descs[nexttx].wtx_cmdlen =
   8256 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8257 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8258 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8259 			if (vlan_has_tag(m0)) {
   8260 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8261 				    htole32(WTX_CMD_VLE);
   8262 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8263 				    htole16(vlan_get_tag(m0));
   8264 			} else
   8265 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8266 
   8267 			dcmdlen = 0;
   8268 		} else {
   8269 			/* setup an advanced data descriptor */
   8270 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8271 			    htole64(dmamap->dm_segs[0].ds_addr);
   8272 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8273 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8274 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8275 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8276 			    htole32(fields);
   8277 			DPRINTF(WM_DEBUG_TX,
   8278 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8279 				device_xname(sc->sc_dev), nexttx,
   8280 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8281 			DPRINTF(WM_DEBUG_TX,
   8282 			    ("\t 0x%08x%08x\n", fields,
   8283 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8284 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8285 		}
   8286 
   8287 		lasttx = nexttx;
   8288 		nexttx = WM_NEXTTX(txq, nexttx);
   8289 		/*
   8290 		 * fill in the next descriptors. legacy or advanced format
   8291 		 * is the same here
   8292 		 */
   8293 		for (seg = 1; seg < dmamap->dm_nsegs;
   8294 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8295 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8296 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8297 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8298 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8299 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8300 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8301 			lasttx = nexttx;
   8302 
   8303 			DPRINTF(WM_DEBUG_TX,
   8304 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8305 				device_xname(sc->sc_dev), nexttx,
   8306 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8307 				dmamap->dm_segs[seg].ds_len));
   8308 		}
   8309 
   8310 		KASSERT(lasttx != -1);
   8311 
   8312 		/*
   8313 		 * Set up the command byte on the last descriptor of
   8314 		 * the packet. If we're in the interrupt delay window,
   8315 		 * delay the interrupt.
   8316 		 */
   8317 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8318 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8319 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8320 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8321 
   8322 		txs->txs_lastdesc = lasttx;
   8323 
   8324 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8325 		    device_xname(sc->sc_dev),
   8326 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8327 
   8328 		/* Sync the descriptors we're using. */
   8329 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8330 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8331 
   8332 		/* Give the packet to the chip. */
   8333 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8334 		sent = true;
   8335 
   8336 		DPRINTF(WM_DEBUG_TX,
   8337 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8338 
   8339 		DPRINTF(WM_DEBUG_TX,
   8340 		    ("%s: TX: finished transmitting packet, job %d\n",
   8341 			device_xname(sc->sc_dev), txq->txq_snext));
   8342 
   8343 		/* Advance the tx pointer. */
   8344 		txq->txq_free -= txs->txs_ndesc;
   8345 		txq->txq_next = nexttx;
   8346 
   8347 		txq->txq_sfree--;
   8348 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8349 
   8350 		/* Pass the packet to any BPF listeners. */
   8351 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8352 	}
   8353 
   8354 	if (m0 != NULL) {
   8355 		if (!is_transmit)
   8356 			ifp->if_flags |= IFF_OACTIVE;
   8357 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8358 		WM_Q_EVCNT_INCR(txq, descdrop);
   8359 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8360 			__func__));
   8361 		m_freem(m0);
   8362 	}
   8363 
   8364 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8365 		/* No more slots; notify upper layer. */
   8366 		if (!is_transmit)
   8367 			ifp->if_flags |= IFF_OACTIVE;
   8368 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8369 	}
   8370 
   8371 	if (sent) {
   8372 		/* Set a watchdog timer in case the chip flakes out. */
   8373 		txq->txq_lastsent = time_uptime;
   8374 		txq->txq_sending = true;
   8375 	}
   8376 }
   8377 
   8378 static void
   8379 wm_deferred_start_locked(struct wm_txqueue *txq)
   8380 {
   8381 	struct wm_softc *sc = txq->txq_sc;
   8382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8383 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8384 	int qid = wmq->wmq_id;
   8385 
   8386 	KASSERT(mutex_owned(txq->txq_lock));
   8387 
   8388 	if (txq->txq_stopping) {
   8389 		mutex_exit(txq->txq_lock);
   8390 		return;
   8391 	}
   8392 
   8393 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8394 		/* XXX need for ALTQ or one CPU system */
   8395 		if (qid == 0)
   8396 			wm_nq_start_locked(ifp);
   8397 		wm_nq_transmit_locked(ifp, txq);
   8398 	} else {
   8399 		/* XXX need for ALTQ or one CPU system */
   8400 		if (qid == 0)
   8401 			wm_start_locked(ifp);
   8402 		wm_transmit_locked(ifp, txq);
   8403 	}
   8404 }
   8405 
   8406 /* Interrupt */
   8407 
   8408 /*
   8409  * wm_txeof:
   8410  *
   8411  *	Helper; handle transmit interrupts.
   8412  */
   8413 static bool
   8414 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8415 {
   8416 	struct wm_softc *sc = txq->txq_sc;
   8417 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8418 	struct wm_txsoft *txs;
   8419 	int count = 0;
   8420 	int i;
   8421 	uint8_t status;
   8422 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8423 	bool more = false;
   8424 
   8425 	KASSERT(mutex_owned(txq->txq_lock));
   8426 
   8427 	if (txq->txq_stopping)
   8428 		return false;
   8429 
   8430 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8431 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8432 	if (wmq->wmq_id == 0)
   8433 		ifp->if_flags &= ~IFF_OACTIVE;
   8434 
   8435 	/*
   8436 	 * Go through the Tx list and free mbufs for those
   8437 	 * frames which have been transmitted.
   8438 	 */
   8439 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8440 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8441 		if (limit-- == 0) {
   8442 			more = true;
   8443 			DPRINTF(WM_DEBUG_TX,
   8444 			    ("%s: TX: loop limited, job %d is not processed\n",
   8445 				device_xname(sc->sc_dev), i));
   8446 			break;
   8447 		}
   8448 
   8449 		txs = &txq->txq_soft[i];
   8450 
   8451 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8452 			device_xname(sc->sc_dev), i));
   8453 
   8454 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8455 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8456 
   8457 		status =
   8458 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8459 		if ((status & WTX_ST_DD) == 0) {
   8460 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8461 			    BUS_DMASYNC_PREREAD);
   8462 			break;
   8463 		}
   8464 
   8465 		count++;
   8466 		DPRINTF(WM_DEBUG_TX,
   8467 		    ("%s: TX: job %d done: descs %d..%d\n",
   8468 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8469 		    txs->txs_lastdesc));
   8470 
   8471 		/*
   8472 		 * XXX We should probably be using the statistics
   8473 		 * XXX registers, but I don't know if they exist
   8474 		 * XXX on chips before the i82544.
   8475 		 */
   8476 
   8477 #ifdef WM_EVENT_COUNTERS
   8478 		if (status & WTX_ST_TU)
   8479 			WM_Q_EVCNT_INCR(txq, underrun);
   8480 #endif /* WM_EVENT_COUNTERS */
   8481 
   8482 		/*
   8483 		 * 82574 and newer's document says the status field has neither
   8484 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8485 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8486 		 * Developer's Manual", 82574 datasheet and newer.
   8487 		 *
   8488 		 * XXX I saw the LC bit was set on I218 even though the media
   8489 		 * was full duplex, so the bit might be used for other
   8490 		 * meaning ...(I have no document).
   8491 		 */
   8492 
   8493 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8494 		    && ((sc->sc_type < WM_T_82574)
   8495 			|| (sc->sc_type == WM_T_80003))) {
   8496 			ifp->if_oerrors++;
   8497 			if (status & WTX_ST_LC)
   8498 				log(LOG_WARNING, "%s: late collision\n",
   8499 				    device_xname(sc->sc_dev));
   8500 			else if (status & WTX_ST_EC) {
   8501 				ifp->if_collisions +=
   8502 				    TX_COLLISION_THRESHOLD + 1;
   8503 				log(LOG_WARNING, "%s: excessive collisions\n",
   8504 				    device_xname(sc->sc_dev));
   8505 			}
   8506 		} else
   8507 			ifp->if_opackets++;
   8508 
   8509 		txq->txq_packets++;
   8510 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8511 
   8512 		txq->txq_free += txs->txs_ndesc;
   8513 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8514 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8515 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8516 		m_freem(txs->txs_mbuf);
   8517 		txs->txs_mbuf = NULL;
   8518 	}
   8519 
   8520 	/* Update the dirty transmit buffer pointer. */
   8521 	txq->txq_sdirty = i;
   8522 	DPRINTF(WM_DEBUG_TX,
   8523 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8524 
   8525 	if (count != 0)
   8526 		rnd_add_uint32(&sc->rnd_source, count);
   8527 
   8528 	/*
   8529 	 * If there are no more pending transmissions, cancel the watchdog
   8530 	 * timer.
   8531 	 */
   8532 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8533 		txq->txq_sending = false;
   8534 
   8535 	return more;
   8536 }
   8537 
   8538 static inline uint32_t
   8539 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8540 {
   8541 	struct wm_softc *sc = rxq->rxq_sc;
   8542 
   8543 	if (sc->sc_type == WM_T_82574)
   8544 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8545 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8546 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8547 	else
   8548 		return rxq->rxq_descs[idx].wrx_status;
   8549 }
   8550 
   8551 static inline uint32_t
   8552 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8553 {
   8554 	struct wm_softc *sc = rxq->rxq_sc;
   8555 
   8556 	if (sc->sc_type == WM_T_82574)
   8557 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8558 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8559 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8560 	else
   8561 		return rxq->rxq_descs[idx].wrx_errors;
   8562 }
   8563 
   8564 static inline uint16_t
   8565 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8566 {
   8567 	struct wm_softc *sc = rxq->rxq_sc;
   8568 
   8569 	if (sc->sc_type == WM_T_82574)
   8570 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8571 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8572 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8573 	else
   8574 		return rxq->rxq_descs[idx].wrx_special;
   8575 }
   8576 
   8577 static inline int
   8578 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8579 {
   8580 	struct wm_softc *sc = rxq->rxq_sc;
   8581 
   8582 	if (sc->sc_type == WM_T_82574)
   8583 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8584 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8585 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8586 	else
   8587 		return rxq->rxq_descs[idx].wrx_len;
   8588 }
   8589 
   8590 #ifdef WM_DEBUG
   8591 static inline uint32_t
   8592 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8593 {
   8594 	struct wm_softc *sc = rxq->rxq_sc;
   8595 
   8596 	if (sc->sc_type == WM_T_82574)
   8597 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8598 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8599 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8600 	else
   8601 		return 0;
   8602 }
   8603 
   8604 static inline uint8_t
   8605 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8606 {
   8607 	struct wm_softc *sc = rxq->rxq_sc;
   8608 
   8609 	if (sc->sc_type == WM_T_82574)
   8610 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8611 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8612 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8613 	else
   8614 		return 0;
   8615 }
   8616 #endif /* WM_DEBUG */
   8617 
   8618 static inline bool
   8619 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8620     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8621 {
   8622 
   8623 	if (sc->sc_type == WM_T_82574)
   8624 		return (status & ext_bit) != 0;
   8625 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8626 		return (status & nq_bit) != 0;
   8627 	else
   8628 		return (status & legacy_bit) != 0;
   8629 }
   8630 
   8631 static inline bool
   8632 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8633     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8634 {
   8635 
   8636 	if (sc->sc_type == WM_T_82574)
   8637 		return (error & ext_bit) != 0;
   8638 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8639 		return (error & nq_bit) != 0;
   8640 	else
   8641 		return (error & legacy_bit) != 0;
   8642 }
   8643 
   8644 static inline bool
   8645 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8646 {
   8647 
   8648 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8649 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8650 		return true;
   8651 	else
   8652 		return false;
   8653 }
   8654 
   8655 static inline bool
   8656 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8657 {
   8658 	struct wm_softc *sc = rxq->rxq_sc;
   8659 
   8660 	/* XXXX missing error bit for newqueue? */
   8661 	if (wm_rxdesc_is_set_error(sc, errors,
   8662 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8663 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8664 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8665 		NQRXC_ERROR_RXE)) {
   8666 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8667 		    EXTRXC_ERROR_SE, 0))
   8668 			log(LOG_WARNING, "%s: symbol error\n",
   8669 			    device_xname(sc->sc_dev));
   8670 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8671 		    EXTRXC_ERROR_SEQ, 0))
   8672 			log(LOG_WARNING, "%s: receive sequence error\n",
   8673 			    device_xname(sc->sc_dev));
   8674 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8675 		    EXTRXC_ERROR_CE, 0))
   8676 			log(LOG_WARNING, "%s: CRC error\n",
   8677 			    device_xname(sc->sc_dev));
   8678 		return true;
   8679 	}
   8680 
   8681 	return false;
   8682 }
   8683 
   8684 static inline bool
   8685 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8686 {
   8687 	struct wm_softc *sc = rxq->rxq_sc;
   8688 
   8689 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8690 		NQRXC_STATUS_DD)) {
   8691 		/* We have processed all of the receive descriptors. */
   8692 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8693 		return false;
   8694 	}
   8695 
   8696 	return true;
   8697 }
   8698 
   8699 static inline bool
   8700 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8701     uint16_t vlantag, struct mbuf *m)
   8702 {
   8703 
   8704 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8705 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8706 		vlan_set_tag(m, le16toh(vlantag));
   8707 	}
   8708 
   8709 	return true;
   8710 }
   8711 
   8712 static inline void
   8713 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8714     uint32_t errors, struct mbuf *m)
   8715 {
   8716 	struct wm_softc *sc = rxq->rxq_sc;
   8717 
   8718 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8719 		if (wm_rxdesc_is_set_status(sc, status,
   8720 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8721 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8722 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8723 			if (wm_rxdesc_is_set_error(sc, errors,
   8724 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8725 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8726 		}
   8727 		if (wm_rxdesc_is_set_status(sc, status,
   8728 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8729 			/*
   8730 			 * Note: we don't know if this was TCP or UDP,
   8731 			 * so we just set both bits, and expect the
   8732 			 * upper layers to deal.
   8733 			 */
   8734 			WM_Q_EVCNT_INCR(rxq, tusum);
   8735 			m->m_pkthdr.csum_flags |=
   8736 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8737 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8738 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8739 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8740 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8741 		}
   8742 	}
   8743 }
   8744 
   8745 /*
   8746  * wm_rxeof:
   8747  *
   8748  *	Helper; handle receive interrupts.
   8749  */
   8750 static bool
   8751 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8752 {
   8753 	struct wm_softc *sc = rxq->rxq_sc;
   8754 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8755 	struct wm_rxsoft *rxs;
   8756 	struct mbuf *m;
   8757 	int i, len;
   8758 	int count = 0;
   8759 	uint32_t status, errors;
   8760 	uint16_t vlantag;
   8761 	bool more = false;
   8762 
   8763 	KASSERT(mutex_owned(rxq->rxq_lock));
   8764 
   8765 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8766 		if (limit-- == 0) {
   8767 			rxq->rxq_ptr = i;
   8768 			more = true;
   8769 			DPRINTF(WM_DEBUG_RX,
   8770 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8771 				device_xname(sc->sc_dev), i));
   8772 			break;
   8773 		}
   8774 
   8775 		rxs = &rxq->rxq_soft[i];
   8776 
   8777 		DPRINTF(WM_DEBUG_RX,
   8778 		    ("%s: RX: checking descriptor %d\n",
   8779 			device_xname(sc->sc_dev), i));
   8780 		wm_cdrxsync(rxq, i,
   8781 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8782 
   8783 		status = wm_rxdesc_get_status(rxq, i);
   8784 		errors = wm_rxdesc_get_errors(rxq, i);
   8785 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8786 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8787 #ifdef WM_DEBUG
   8788 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8789 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8790 #endif
   8791 
   8792 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8793 			/*
   8794 			 * Update the receive pointer holding rxq_lock
   8795 			 * consistent with increment counter.
   8796 			 */
   8797 			rxq->rxq_ptr = i;
   8798 			break;
   8799 		}
   8800 
   8801 		count++;
   8802 		if (__predict_false(rxq->rxq_discard)) {
   8803 			DPRINTF(WM_DEBUG_RX,
   8804 			    ("%s: RX: discarding contents of descriptor %d\n",
   8805 				device_xname(sc->sc_dev), i));
   8806 			wm_init_rxdesc(rxq, i);
   8807 			if (wm_rxdesc_is_eop(rxq, status)) {
   8808 				/* Reset our state. */
   8809 				DPRINTF(WM_DEBUG_RX,
   8810 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8811 					device_xname(sc->sc_dev)));
   8812 				rxq->rxq_discard = 0;
   8813 			}
   8814 			continue;
   8815 		}
   8816 
   8817 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8818 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8819 
   8820 		m = rxs->rxs_mbuf;
   8821 
   8822 		/*
   8823 		 * Add a new receive buffer to the ring, unless of
   8824 		 * course the length is zero. Treat the latter as a
   8825 		 * failed mapping.
   8826 		 */
   8827 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8828 			/*
   8829 			 * Failed, throw away what we've done so
   8830 			 * far, and discard the rest of the packet.
   8831 			 */
   8832 			ifp->if_ierrors++;
   8833 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8834 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8835 			wm_init_rxdesc(rxq, i);
   8836 			if (!wm_rxdesc_is_eop(rxq, status))
   8837 				rxq->rxq_discard = 1;
   8838 			if (rxq->rxq_head != NULL)
   8839 				m_freem(rxq->rxq_head);
   8840 			WM_RXCHAIN_RESET(rxq);
   8841 			DPRINTF(WM_DEBUG_RX,
   8842 			    ("%s: RX: Rx buffer allocation failed, "
   8843 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8844 				rxq->rxq_discard ? " (discard)" : ""));
   8845 			continue;
   8846 		}
   8847 
   8848 		m->m_len = len;
   8849 		rxq->rxq_len += len;
   8850 		DPRINTF(WM_DEBUG_RX,
   8851 		    ("%s: RX: buffer at %p len %d\n",
   8852 			device_xname(sc->sc_dev), m->m_data, len));
   8853 
   8854 		/* If this is not the end of the packet, keep looking. */
   8855 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8856 			WM_RXCHAIN_LINK(rxq, m);
   8857 			DPRINTF(WM_DEBUG_RX,
   8858 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8859 				device_xname(sc->sc_dev), rxq->rxq_len));
   8860 			continue;
   8861 		}
   8862 
   8863 		/*
   8864 		 * Okay, we have the entire packet now. The chip is
   8865 		 * configured to include the FCS except I350 and I21[01]
   8866 		 * (not all chips can be configured to strip it),
   8867 		 * so we need to trim it.
   8868 		 * May need to adjust length of previous mbuf in the
   8869 		 * chain if the current mbuf is too short.
   8870 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8871 		 * is always set in I350, so we don't trim it.
   8872 		 */
   8873 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8874 		    && (sc->sc_type != WM_T_I210)
   8875 		    && (sc->sc_type != WM_T_I211)) {
   8876 			if (m->m_len < ETHER_CRC_LEN) {
   8877 				rxq->rxq_tail->m_len
   8878 				    -= (ETHER_CRC_LEN - m->m_len);
   8879 				m->m_len = 0;
   8880 			} else
   8881 				m->m_len -= ETHER_CRC_LEN;
   8882 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8883 		} else
   8884 			len = rxq->rxq_len;
   8885 
   8886 		WM_RXCHAIN_LINK(rxq, m);
   8887 
   8888 		*rxq->rxq_tailp = NULL;
   8889 		m = rxq->rxq_head;
   8890 
   8891 		WM_RXCHAIN_RESET(rxq);
   8892 
   8893 		DPRINTF(WM_DEBUG_RX,
   8894 		    ("%s: RX: have entire packet, len -> %d\n",
   8895 			device_xname(sc->sc_dev), len));
   8896 
   8897 		/* If an error occurred, update stats and drop the packet. */
   8898 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8899 			m_freem(m);
   8900 			continue;
   8901 		}
   8902 
   8903 		/* No errors.  Receive the packet. */
   8904 		m_set_rcvif(m, ifp);
   8905 		m->m_pkthdr.len = len;
   8906 		/*
   8907 		 * TODO
   8908 		 * should be save rsshash and rsstype to this mbuf.
   8909 		 */
   8910 		DPRINTF(WM_DEBUG_RX,
   8911 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8912 			device_xname(sc->sc_dev), rsstype, rsshash));
   8913 
   8914 		/*
   8915 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8916 		 * for us.  Associate the tag with the packet.
   8917 		 */
   8918 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8919 			continue;
   8920 
   8921 		/* Set up checksum info for this packet. */
   8922 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8923 		/*
   8924 		 * Update the receive pointer holding rxq_lock consistent with
   8925 		 * increment counter.
   8926 		 */
   8927 		rxq->rxq_ptr = i;
   8928 		rxq->rxq_packets++;
   8929 		rxq->rxq_bytes += len;
   8930 		mutex_exit(rxq->rxq_lock);
   8931 
   8932 		/* Pass it on. */
   8933 		if_percpuq_enqueue(sc->sc_ipq, m);
   8934 
   8935 		mutex_enter(rxq->rxq_lock);
   8936 
   8937 		if (rxq->rxq_stopping)
   8938 			break;
   8939 	}
   8940 
   8941 	if (count != 0)
   8942 		rnd_add_uint32(&sc->rnd_source, count);
   8943 
   8944 	DPRINTF(WM_DEBUG_RX,
   8945 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8946 
   8947 	return more;
   8948 }
   8949 
   8950 /*
   8951  * wm_linkintr_gmii:
   8952  *
   8953  *	Helper; handle link interrupts for GMII.
   8954  */
   8955 static void
   8956 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8957 {
   8958 
   8959 	KASSERT(WM_CORE_LOCKED(sc));
   8960 
   8961 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8962 		__func__));
   8963 
   8964 	if (icr & ICR_LSC) {
   8965 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8966 		uint32_t reg;
   8967 		bool link;
   8968 
   8969 		link = status & STATUS_LU;
   8970 		if (link) {
   8971 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8972 				device_xname(sc->sc_dev),
   8973 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8974 		} else {
   8975 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8976 				device_xname(sc->sc_dev)));
   8977 		}
   8978 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8979 			wm_gig_downshift_workaround_ich8lan(sc);
   8980 
   8981 		if ((sc->sc_type == WM_T_ICH8)
   8982 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8983 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8984 		}
   8985 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8986 			device_xname(sc->sc_dev)));
   8987 		mii_pollstat(&sc->sc_mii);
   8988 		if (sc->sc_type == WM_T_82543) {
   8989 			int miistatus, active;
   8990 
   8991 			/*
   8992 			 * With 82543, we need to force speed and
   8993 			 * duplex on the MAC equal to what the PHY
   8994 			 * speed and duplex configuration is.
   8995 			 */
   8996 			miistatus = sc->sc_mii.mii_media_status;
   8997 
   8998 			if (miistatus & IFM_ACTIVE) {
   8999 				active = sc->sc_mii.mii_media_active;
   9000 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9001 				switch (IFM_SUBTYPE(active)) {
   9002 				case IFM_10_T:
   9003 					sc->sc_ctrl |= CTRL_SPEED_10;
   9004 					break;
   9005 				case IFM_100_TX:
   9006 					sc->sc_ctrl |= CTRL_SPEED_100;
   9007 					break;
   9008 				case IFM_1000_T:
   9009 					sc->sc_ctrl |= CTRL_SPEED_1000;
   9010 					break;
   9011 				default:
   9012 					/*
   9013 					 * fiber?
   9014 					 * Shoud not enter here.
   9015 					 */
   9016 					printf("unknown media (%x)\n", active);
   9017 					break;
   9018 				}
   9019 				if (active & IFM_FDX)
   9020 					sc->sc_ctrl |= CTRL_FD;
   9021 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9022 			}
   9023 		} else if (sc->sc_type == WM_T_PCH) {
   9024 			wm_k1_gig_workaround_hv(sc,
   9025 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9026 		}
   9027 
   9028 		/*
   9029 		 * I217 Packet Loss issue:
   9030 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9031 		 * on power up.
   9032 		 * Set the Beacon Duration for I217 to 8 usec
   9033 		 */
   9034 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9035 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9036 			reg &= ~FEXTNVM4_BEACON_DURATION;
   9037 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   9038 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9039 		}
   9040 
   9041 		/* Work-around I218 hang issue */
   9042 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9043 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9044 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9045 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9046 			wm_k1_workaround_lpt_lp(sc, link);
   9047 
   9048 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9049 			/*
   9050 			 * Set platform power management values for Latency
   9051 			 * Tolerance Reporting (LTR)
   9052 			 */
   9053 			wm_platform_pm_pch_lpt(sc,
   9054 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9055 		}
   9056 
   9057 		/* Clear link partner's EEE ability */
   9058 		sc->eee_lp_ability = 0;
   9059 
   9060 		/* FEXTNVM6 K1-off workaround */
   9061 		if (sc->sc_type == WM_T_PCH_SPT) {
   9062 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9063 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   9064 			    & FEXTNVM6_K1_OFF_ENABLE)
   9065 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   9066 			else
   9067 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9068 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9069 		}
   9070 
   9071 		if (!link)
   9072 			return;
   9073 
   9074 		switch (sc->sc_type) {
   9075 		case WM_T_PCH2:
   9076 			wm_k1_workaround_lv(sc);
   9077 			/* FALLTHROUGH */
   9078 		case WM_T_PCH:
   9079 			if (sc->sc_phytype == WMPHY_82578)
   9080 				wm_link_stall_workaround_hv(sc);
   9081 			break;
   9082 		default:
   9083 			break;
   9084 		}
   9085 
   9086 		/* Enable/Disable EEE after link up */
   9087 		if (sc->sc_phytype > WMPHY_82579)
   9088 			wm_set_eee_pchlan(sc);
   9089 
   9090 	} else if (icr & ICR_RXSEQ) {
   9091 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   9092 			device_xname(sc->sc_dev)));
   9093 	}
   9094 }
   9095 
   9096 /*
   9097  * wm_linkintr_tbi:
   9098  *
   9099  *	Helper; handle link interrupts for TBI mode.
   9100  */
   9101 static void
   9102 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9103 {
   9104 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9105 	uint32_t status;
   9106 
   9107 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9108 		__func__));
   9109 
   9110 	status = CSR_READ(sc, WMREG_STATUS);
   9111 	if (icr & ICR_LSC) {
   9112 		wm_check_for_link(sc);
   9113 		if (status & STATUS_LU) {
   9114 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9115 				device_xname(sc->sc_dev),
   9116 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9117 			/*
   9118 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9119 			 * so we should update sc->sc_ctrl
   9120 			 */
   9121 
   9122 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9123 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9124 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9125 			if (status & STATUS_FD)
   9126 				sc->sc_tctl |=
   9127 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9128 			else
   9129 				sc->sc_tctl |=
   9130 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9131 			if (sc->sc_ctrl & CTRL_TFCE)
   9132 				sc->sc_fcrtl |= FCRTL_XONE;
   9133 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9134 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9135 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9136 			sc->sc_tbi_linkup = 1;
   9137 			if_link_state_change(ifp, LINK_STATE_UP);
   9138 		} else {
   9139 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9140 				device_xname(sc->sc_dev)));
   9141 			sc->sc_tbi_linkup = 0;
   9142 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9143 		}
   9144 		/* Update LED */
   9145 		wm_tbi_serdes_set_linkled(sc);
   9146 	} else if (icr & ICR_RXSEQ) {
   9147 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9148 			device_xname(sc->sc_dev)));
   9149 	}
   9150 }
   9151 
   9152 /*
   9153  * wm_linkintr_serdes:
   9154  *
   9155  *	Helper; handle link interrupts for TBI mode.
   9156  */
   9157 static void
   9158 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9159 {
   9160 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9161 	struct mii_data *mii = &sc->sc_mii;
   9162 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9163 	uint32_t pcs_adv, pcs_lpab, reg;
   9164 
   9165 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9166 		__func__));
   9167 
   9168 	if (icr & ICR_LSC) {
   9169 		/* Check PCS */
   9170 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9171 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9172 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9173 				device_xname(sc->sc_dev)));
   9174 			mii->mii_media_status |= IFM_ACTIVE;
   9175 			sc->sc_tbi_linkup = 1;
   9176 			if_link_state_change(ifp, LINK_STATE_UP);
   9177 		} else {
   9178 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9179 				device_xname(sc->sc_dev)));
   9180 			mii->mii_media_status |= IFM_NONE;
   9181 			sc->sc_tbi_linkup = 0;
   9182 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9183 			wm_tbi_serdes_set_linkled(sc);
   9184 			return;
   9185 		}
   9186 		mii->mii_media_active |= IFM_1000_SX;
   9187 		if ((reg & PCS_LSTS_FDX) != 0)
   9188 			mii->mii_media_active |= IFM_FDX;
   9189 		else
   9190 			mii->mii_media_active |= IFM_HDX;
   9191 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9192 			/* Check flow */
   9193 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9194 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9195 				DPRINTF(WM_DEBUG_LINK,
   9196 				    ("XXX LINKOK but not ACOMP\n"));
   9197 				return;
   9198 			}
   9199 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9200 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9201 			DPRINTF(WM_DEBUG_LINK,
   9202 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9203 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9204 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9205 				mii->mii_media_active |= IFM_FLOW
   9206 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9207 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9208 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9209 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9210 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9211 				mii->mii_media_active |= IFM_FLOW
   9212 				    | IFM_ETH_TXPAUSE;
   9213 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9214 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9215 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9216 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9217 				mii->mii_media_active |= IFM_FLOW
   9218 				    | IFM_ETH_RXPAUSE;
   9219 		}
   9220 		/* Update LED */
   9221 		wm_tbi_serdes_set_linkled(sc);
   9222 	} else {
   9223 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9224 		    device_xname(sc->sc_dev)));
   9225 	}
   9226 }
   9227 
   9228 /*
   9229  * wm_linkintr:
   9230  *
   9231  *	Helper; handle link interrupts.
   9232  */
   9233 static void
   9234 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9235 {
   9236 
   9237 	KASSERT(WM_CORE_LOCKED(sc));
   9238 
   9239 	if (sc->sc_flags & WM_F_HAS_MII)
   9240 		wm_linkintr_gmii(sc, icr);
   9241 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9242 	    && (sc->sc_type >= WM_T_82575))
   9243 		wm_linkintr_serdes(sc, icr);
   9244 	else
   9245 		wm_linkintr_tbi(sc, icr);
   9246 }
   9247 
   9248 /*
   9249  * wm_intr_legacy:
   9250  *
   9251  *	Interrupt service routine for INTx and MSI.
   9252  */
   9253 static int
   9254 wm_intr_legacy(void *arg)
   9255 {
   9256 	struct wm_softc *sc = arg;
   9257 	struct wm_queue *wmq = &sc->sc_queue[0];
   9258 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9259 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9260 	uint32_t icr, rndval = 0;
   9261 	int handled = 0;
   9262 
   9263 	while (1 /* CONSTCOND */) {
   9264 		icr = CSR_READ(sc, WMREG_ICR);
   9265 		if ((icr & sc->sc_icr) == 0)
   9266 			break;
   9267 		if (handled == 0) {
   9268 			DPRINTF(WM_DEBUG_TX,
   9269 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9270 		}
   9271 		if (rndval == 0)
   9272 			rndval = icr;
   9273 
   9274 		mutex_enter(rxq->rxq_lock);
   9275 
   9276 		if (rxq->rxq_stopping) {
   9277 			mutex_exit(rxq->rxq_lock);
   9278 			break;
   9279 		}
   9280 
   9281 		handled = 1;
   9282 
   9283 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9284 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9285 			DPRINTF(WM_DEBUG_RX,
   9286 			    ("%s: RX: got Rx intr 0x%08x\n",
   9287 				device_xname(sc->sc_dev),
   9288 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9289 			WM_Q_EVCNT_INCR(rxq, intr);
   9290 		}
   9291 #endif
   9292 		/*
   9293 		 * wm_rxeof() does *not* call upper layer functions directly,
   9294 		 * as if_percpuq_enqueue() just call softint_schedule().
   9295 		 * So, we can call wm_rxeof() in interrupt context.
   9296 		 */
   9297 		wm_rxeof(rxq, UINT_MAX);
   9298 
   9299 		mutex_exit(rxq->rxq_lock);
   9300 		mutex_enter(txq->txq_lock);
   9301 
   9302 		if (txq->txq_stopping) {
   9303 			mutex_exit(txq->txq_lock);
   9304 			break;
   9305 		}
   9306 
   9307 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9308 		if (icr & ICR_TXDW) {
   9309 			DPRINTF(WM_DEBUG_TX,
   9310 			    ("%s: TX: got TXDW interrupt\n",
   9311 				device_xname(sc->sc_dev)));
   9312 			WM_Q_EVCNT_INCR(txq, txdw);
   9313 		}
   9314 #endif
   9315 		wm_txeof(txq, UINT_MAX);
   9316 
   9317 		mutex_exit(txq->txq_lock);
   9318 		WM_CORE_LOCK(sc);
   9319 
   9320 		if (sc->sc_core_stopping) {
   9321 			WM_CORE_UNLOCK(sc);
   9322 			break;
   9323 		}
   9324 
   9325 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9326 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9327 			wm_linkintr(sc, icr);
   9328 		}
   9329 
   9330 		WM_CORE_UNLOCK(sc);
   9331 
   9332 		if (icr & ICR_RXO) {
   9333 #if defined(WM_DEBUG)
   9334 			log(LOG_WARNING, "%s: Receive overrun\n",
   9335 			    device_xname(sc->sc_dev));
   9336 #endif /* defined(WM_DEBUG) */
   9337 		}
   9338 	}
   9339 
   9340 	rnd_add_uint32(&sc->rnd_source, rndval);
   9341 
   9342 	if (handled) {
   9343 		/* Try to get more packets going. */
   9344 		softint_schedule(wmq->wmq_si);
   9345 	}
   9346 
   9347 	return handled;
   9348 }
   9349 
   9350 static inline void
   9351 wm_txrxintr_disable(struct wm_queue *wmq)
   9352 {
   9353 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9354 
   9355 	if (sc->sc_type == WM_T_82574)
   9356 		CSR_WRITE(sc, WMREG_IMC,
   9357 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9358 	else if (sc->sc_type == WM_T_82575)
   9359 		CSR_WRITE(sc, WMREG_EIMC,
   9360 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9361 	else
   9362 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9363 }
   9364 
   9365 static inline void
   9366 wm_txrxintr_enable(struct wm_queue *wmq)
   9367 {
   9368 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9369 
   9370 	wm_itrs_calculate(sc, wmq);
   9371 
   9372 	/*
   9373 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9374 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9375 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9376 	 * while each wm_handle_queue(wmq) is runnig.
   9377 	 */
   9378 	if (sc->sc_type == WM_T_82574)
   9379 		CSR_WRITE(sc, WMREG_IMS,
   9380 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9381 	else if (sc->sc_type == WM_T_82575)
   9382 		CSR_WRITE(sc, WMREG_EIMS,
   9383 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9384 	else
   9385 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9386 }
   9387 
   9388 static int
   9389 wm_txrxintr_msix(void *arg)
   9390 {
   9391 	struct wm_queue *wmq = arg;
   9392 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9393 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9394 	struct wm_softc *sc = txq->txq_sc;
   9395 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9396 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9397 	bool txmore;
   9398 	bool rxmore;
   9399 
   9400 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9401 
   9402 	DPRINTF(WM_DEBUG_TX,
   9403 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9404 
   9405 	wm_txrxintr_disable(wmq);
   9406 
   9407 	mutex_enter(txq->txq_lock);
   9408 
   9409 	if (txq->txq_stopping) {
   9410 		mutex_exit(txq->txq_lock);
   9411 		return 0;
   9412 	}
   9413 
   9414 	WM_Q_EVCNT_INCR(txq, txdw);
   9415 	txmore = wm_txeof(txq, txlimit);
   9416 	/* wm_deferred start() is done in wm_handle_queue(). */
   9417 	mutex_exit(txq->txq_lock);
   9418 
   9419 	DPRINTF(WM_DEBUG_RX,
   9420 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9421 	mutex_enter(rxq->rxq_lock);
   9422 
   9423 	if (rxq->rxq_stopping) {
   9424 		mutex_exit(rxq->rxq_lock);
   9425 		return 0;
   9426 	}
   9427 
   9428 	WM_Q_EVCNT_INCR(rxq, intr);
   9429 	rxmore = wm_rxeof(rxq, rxlimit);
   9430 	mutex_exit(rxq->rxq_lock);
   9431 
   9432 	wm_itrs_writereg(sc, wmq);
   9433 
   9434 	if (txmore || rxmore)
   9435 		softint_schedule(wmq->wmq_si);
   9436 	else
   9437 		wm_txrxintr_enable(wmq);
   9438 
   9439 	return 1;
   9440 }
   9441 
   9442 static void
   9443 wm_handle_queue(void *arg)
   9444 {
   9445 	struct wm_queue *wmq = arg;
   9446 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9447 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9448 	struct wm_softc *sc = txq->txq_sc;
   9449 	u_int txlimit = sc->sc_tx_process_limit;
   9450 	u_int rxlimit = sc->sc_rx_process_limit;
   9451 	bool txmore;
   9452 	bool rxmore;
   9453 
   9454 	mutex_enter(txq->txq_lock);
   9455 	if (txq->txq_stopping) {
   9456 		mutex_exit(txq->txq_lock);
   9457 		return;
   9458 	}
   9459 	txmore = wm_txeof(txq, txlimit);
   9460 	wm_deferred_start_locked(txq);
   9461 	mutex_exit(txq->txq_lock);
   9462 
   9463 	mutex_enter(rxq->rxq_lock);
   9464 	if (rxq->rxq_stopping) {
   9465 		mutex_exit(rxq->rxq_lock);
   9466 		return;
   9467 	}
   9468 	WM_Q_EVCNT_INCR(rxq, defer);
   9469 	rxmore = wm_rxeof(rxq, rxlimit);
   9470 	mutex_exit(rxq->rxq_lock);
   9471 
   9472 	if (txmore || rxmore)
   9473 		softint_schedule(wmq->wmq_si);
   9474 	else
   9475 		wm_txrxintr_enable(wmq);
   9476 }
   9477 
   9478 /*
   9479  * wm_linkintr_msix:
   9480  *
   9481  *	Interrupt service routine for link status change for MSI-X.
   9482  */
   9483 static int
   9484 wm_linkintr_msix(void *arg)
   9485 {
   9486 	struct wm_softc *sc = arg;
   9487 	uint32_t reg;
   9488 	bool has_rxo;
   9489 
   9490 	DPRINTF(WM_DEBUG_LINK,
   9491 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9492 
   9493 	reg = CSR_READ(sc, WMREG_ICR);
   9494 	WM_CORE_LOCK(sc);
   9495 	if (sc->sc_core_stopping)
   9496 		goto out;
   9497 
   9498 	if ((reg & ICR_LSC) != 0) {
   9499 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9500 		wm_linkintr(sc, ICR_LSC);
   9501 	}
   9502 
   9503 	/*
   9504 	 * XXX 82574 MSI-X mode workaround
   9505 	 *
   9506 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9507 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9508 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9509 	 * interrupts by writing WMREG_ICS to process receive packets.
   9510 	 */
   9511 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9512 #if defined(WM_DEBUG)
   9513 		log(LOG_WARNING, "%s: Receive overrun\n",
   9514 		    device_xname(sc->sc_dev));
   9515 #endif /* defined(WM_DEBUG) */
   9516 
   9517 		has_rxo = true;
   9518 		/*
   9519 		 * The RXO interrupt is very high rate when receive traffic is
   9520 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9521 		 * interrupts. ICR_OTHER will be enabled at the end of
   9522 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9523 		 * ICR_RXQ(1) interrupts.
   9524 		 */
   9525 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9526 
   9527 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9528 	}
   9529 
   9530 
   9531 
   9532 out:
   9533 	WM_CORE_UNLOCK(sc);
   9534 
   9535 	if (sc->sc_type == WM_T_82574) {
   9536 		if (!has_rxo)
   9537 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9538 		else
   9539 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9540 	} else if (sc->sc_type == WM_T_82575)
   9541 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9542 	else
   9543 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9544 
   9545 	return 1;
   9546 }
   9547 
   9548 /*
   9549  * Media related.
   9550  * GMII, SGMII, TBI (and SERDES)
   9551  */
   9552 
   9553 /* Common */
   9554 
   9555 /*
   9556  * wm_tbi_serdes_set_linkled:
   9557  *
   9558  *	Update the link LED on TBI and SERDES devices.
   9559  */
   9560 static void
   9561 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9562 {
   9563 
   9564 	if (sc->sc_tbi_linkup)
   9565 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9566 	else
   9567 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9568 
   9569 	/* 82540 or newer devices are active low */
   9570 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9571 
   9572 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9573 }
   9574 
   9575 /* GMII related */
   9576 
   9577 /*
   9578  * wm_gmii_reset:
   9579  *
   9580  *	Reset the PHY.
   9581  */
   9582 static void
   9583 wm_gmii_reset(struct wm_softc *sc)
   9584 {
   9585 	uint32_t reg;
   9586 	int rv;
   9587 
   9588 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9589 		device_xname(sc->sc_dev), __func__));
   9590 
   9591 	rv = sc->phy.acquire(sc);
   9592 	if (rv != 0) {
   9593 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9594 		    __func__);
   9595 		return;
   9596 	}
   9597 
   9598 	switch (sc->sc_type) {
   9599 	case WM_T_82542_2_0:
   9600 	case WM_T_82542_2_1:
   9601 		/* null */
   9602 		break;
   9603 	case WM_T_82543:
   9604 		/*
   9605 		 * With 82543, we need to force speed and duplex on the MAC
   9606 		 * equal to what the PHY speed and duplex configuration is.
   9607 		 * In addition, we need to perform a hardware reset on the PHY
   9608 		 * to take it out of reset.
   9609 		 */
   9610 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9611 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9612 
   9613 		/* The PHY reset pin is active-low. */
   9614 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9615 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9616 		    CTRL_EXT_SWDPIN(4));
   9617 		reg |= CTRL_EXT_SWDPIO(4);
   9618 
   9619 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9620 		CSR_WRITE_FLUSH(sc);
   9621 		delay(10*1000);
   9622 
   9623 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9624 		CSR_WRITE_FLUSH(sc);
   9625 		delay(150);
   9626 #if 0
   9627 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9628 #endif
   9629 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9630 		break;
   9631 	case WM_T_82544:	/* reset 10000us */
   9632 	case WM_T_82540:
   9633 	case WM_T_82545:
   9634 	case WM_T_82545_3:
   9635 	case WM_T_82546:
   9636 	case WM_T_82546_3:
   9637 	case WM_T_82541:
   9638 	case WM_T_82541_2:
   9639 	case WM_T_82547:
   9640 	case WM_T_82547_2:
   9641 	case WM_T_82571:	/* reset 100us */
   9642 	case WM_T_82572:
   9643 	case WM_T_82573:
   9644 	case WM_T_82574:
   9645 	case WM_T_82575:
   9646 	case WM_T_82576:
   9647 	case WM_T_82580:
   9648 	case WM_T_I350:
   9649 	case WM_T_I354:
   9650 	case WM_T_I210:
   9651 	case WM_T_I211:
   9652 	case WM_T_82583:
   9653 	case WM_T_80003:
   9654 		/* generic reset */
   9655 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9656 		CSR_WRITE_FLUSH(sc);
   9657 		delay(20000);
   9658 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9659 		CSR_WRITE_FLUSH(sc);
   9660 		delay(20000);
   9661 
   9662 		if ((sc->sc_type == WM_T_82541)
   9663 		    || (sc->sc_type == WM_T_82541_2)
   9664 		    || (sc->sc_type == WM_T_82547)
   9665 		    || (sc->sc_type == WM_T_82547_2)) {
   9666 			/* workaround for igp are done in igp_reset() */
   9667 			/* XXX add code to set LED after phy reset */
   9668 		}
   9669 		break;
   9670 	case WM_T_ICH8:
   9671 	case WM_T_ICH9:
   9672 	case WM_T_ICH10:
   9673 	case WM_T_PCH:
   9674 	case WM_T_PCH2:
   9675 	case WM_T_PCH_LPT:
   9676 	case WM_T_PCH_SPT:
   9677 	case WM_T_PCH_CNP:
   9678 		/* generic reset */
   9679 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9680 		CSR_WRITE_FLUSH(sc);
   9681 		delay(100);
   9682 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9683 		CSR_WRITE_FLUSH(sc);
   9684 		delay(150);
   9685 		break;
   9686 	default:
   9687 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9688 		    __func__);
   9689 		break;
   9690 	}
   9691 
   9692 	sc->phy.release(sc);
   9693 
   9694 	/* get_cfg_done */
   9695 	wm_get_cfg_done(sc);
   9696 
   9697 	/* extra setup */
   9698 	switch (sc->sc_type) {
   9699 	case WM_T_82542_2_0:
   9700 	case WM_T_82542_2_1:
   9701 	case WM_T_82543:
   9702 	case WM_T_82544:
   9703 	case WM_T_82540:
   9704 	case WM_T_82545:
   9705 	case WM_T_82545_3:
   9706 	case WM_T_82546:
   9707 	case WM_T_82546_3:
   9708 	case WM_T_82541_2:
   9709 	case WM_T_82547_2:
   9710 	case WM_T_82571:
   9711 	case WM_T_82572:
   9712 	case WM_T_82573:
   9713 	case WM_T_82574:
   9714 	case WM_T_82583:
   9715 	case WM_T_82575:
   9716 	case WM_T_82576:
   9717 	case WM_T_82580:
   9718 	case WM_T_I350:
   9719 	case WM_T_I354:
   9720 	case WM_T_I210:
   9721 	case WM_T_I211:
   9722 	case WM_T_80003:
   9723 		/* null */
   9724 		break;
   9725 	case WM_T_82541:
   9726 	case WM_T_82547:
   9727 		/* XXX Configure actively LED after PHY reset */
   9728 		break;
   9729 	case WM_T_ICH8:
   9730 	case WM_T_ICH9:
   9731 	case WM_T_ICH10:
   9732 	case WM_T_PCH:
   9733 	case WM_T_PCH2:
   9734 	case WM_T_PCH_LPT:
   9735 	case WM_T_PCH_SPT:
   9736 	case WM_T_PCH_CNP:
   9737 		wm_phy_post_reset(sc);
   9738 		break;
   9739 	default:
   9740 		panic("%s: unknown type\n", __func__);
   9741 		break;
   9742 	}
   9743 }
   9744 
   9745 /*
   9746  * Setup sc_phytype and mii_{read|write}reg.
   9747  *
   9748  *  To identify PHY type, correct read/write function should be selected.
   9749  * To select correct read/write function, PCI ID or MAC type are required
   9750  * without accessing PHY registers.
   9751  *
   9752  *  On the first call of this function, PHY ID is not known yet. Check
   9753  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9754  * result might be incorrect.
   9755  *
   9756  *  In the second call, PHY OUI and model is used to identify PHY type.
   9757  * It might not be perfpect because of the lack of compared entry, but it
   9758  * would be better than the first call.
   9759  *
   9760  *  If the detected new result and previous assumption is different,
   9761  * diagnous message will be printed.
   9762  */
   9763 static void
   9764 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9765     uint16_t phy_model)
   9766 {
   9767 	device_t dev = sc->sc_dev;
   9768 	struct mii_data *mii = &sc->sc_mii;
   9769 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9770 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9771 	mii_readreg_t new_readreg;
   9772 	mii_writereg_t new_writereg;
   9773 
   9774 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9775 		device_xname(sc->sc_dev), __func__));
   9776 
   9777 	if (mii->mii_readreg == NULL) {
   9778 		/*
   9779 		 *  This is the first call of this function. For ICH and PCH
   9780 		 * variants, it's difficult to determine the PHY access method
   9781 		 * by sc_type, so use the PCI product ID for some devices.
   9782 		 */
   9783 
   9784 		switch (sc->sc_pcidevid) {
   9785 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9786 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9787 			/* 82577 */
   9788 			new_phytype = WMPHY_82577;
   9789 			break;
   9790 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9791 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9792 			/* 82578 */
   9793 			new_phytype = WMPHY_82578;
   9794 			break;
   9795 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9796 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9797 			/* 82579 */
   9798 			new_phytype = WMPHY_82579;
   9799 			break;
   9800 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9801 		case PCI_PRODUCT_INTEL_82801I_BM:
   9802 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9803 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9804 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9805 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9806 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9807 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9808 			/* ICH8, 9, 10 with 82567 */
   9809 			new_phytype = WMPHY_BM;
   9810 			break;
   9811 		default:
   9812 			break;
   9813 		}
   9814 	} else {
   9815 		/* It's not the first call. Use PHY OUI and model */
   9816 		switch (phy_oui) {
   9817 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9818 			switch (phy_model) {
   9819 			case 0x0004: /* XXX */
   9820 				new_phytype = WMPHY_82578;
   9821 				break;
   9822 			default:
   9823 				break;
   9824 			}
   9825 			break;
   9826 		case MII_OUI_xxMARVELL:
   9827 			switch (phy_model) {
   9828 			case MII_MODEL_xxMARVELL_I210:
   9829 				new_phytype = WMPHY_I210;
   9830 				break;
   9831 			case MII_MODEL_xxMARVELL_E1011:
   9832 			case MII_MODEL_xxMARVELL_E1000_3:
   9833 			case MII_MODEL_xxMARVELL_E1000_5:
   9834 			case MII_MODEL_xxMARVELL_E1112:
   9835 				new_phytype = WMPHY_M88;
   9836 				break;
   9837 			case MII_MODEL_xxMARVELL_E1149:
   9838 				new_phytype = WMPHY_BM;
   9839 				break;
   9840 			case MII_MODEL_xxMARVELL_E1111:
   9841 			case MII_MODEL_xxMARVELL_I347:
   9842 			case MII_MODEL_xxMARVELL_E1512:
   9843 			case MII_MODEL_xxMARVELL_E1340M:
   9844 			case MII_MODEL_xxMARVELL_E1543:
   9845 				new_phytype = WMPHY_M88;
   9846 				break;
   9847 			case MII_MODEL_xxMARVELL_I82563:
   9848 				new_phytype = WMPHY_GG82563;
   9849 				break;
   9850 			default:
   9851 				break;
   9852 			}
   9853 			break;
   9854 		case MII_OUI_INTEL:
   9855 			switch (phy_model) {
   9856 			case MII_MODEL_INTEL_I82577:
   9857 				new_phytype = WMPHY_82577;
   9858 				break;
   9859 			case MII_MODEL_INTEL_I82579:
   9860 				new_phytype = WMPHY_82579;
   9861 				break;
   9862 			case MII_MODEL_INTEL_I217:
   9863 				new_phytype = WMPHY_I217;
   9864 				break;
   9865 			case MII_MODEL_INTEL_I82580:
   9866 			case MII_MODEL_INTEL_I350:
   9867 				new_phytype = WMPHY_82580;
   9868 				break;
   9869 			default:
   9870 				break;
   9871 			}
   9872 			break;
   9873 		case MII_OUI_yyINTEL:
   9874 			switch (phy_model) {
   9875 			case MII_MODEL_yyINTEL_I82562G:
   9876 			case MII_MODEL_yyINTEL_I82562EM:
   9877 			case MII_MODEL_yyINTEL_I82562ET:
   9878 				new_phytype = WMPHY_IFE;
   9879 				break;
   9880 			case MII_MODEL_yyINTEL_IGP01E1000:
   9881 				new_phytype = WMPHY_IGP;
   9882 				break;
   9883 			case MII_MODEL_yyINTEL_I82566:
   9884 				new_phytype = WMPHY_IGP_3;
   9885 				break;
   9886 			default:
   9887 				break;
   9888 			}
   9889 			break;
   9890 		default:
   9891 			break;
   9892 		}
   9893 		if (new_phytype == WMPHY_UNKNOWN)
   9894 			aprint_verbose_dev(dev,
   9895 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9896 			    __func__, phy_oui, phy_model);
   9897 
   9898 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9899 		    && (sc->sc_phytype != new_phytype )) {
   9900 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9901 			    "was incorrect. PHY type from PHY ID = %u\n",
   9902 			    sc->sc_phytype, new_phytype);
   9903 		}
   9904 	}
   9905 
   9906 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9907 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9908 		/* SGMII */
   9909 		new_readreg = wm_sgmii_readreg;
   9910 		new_writereg = wm_sgmii_writereg;
   9911 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9912 		/* BM2 (phyaddr == 1) */
   9913 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9914 		    && (new_phytype != WMPHY_BM)
   9915 		    && (new_phytype != WMPHY_UNKNOWN))
   9916 			doubt_phytype = new_phytype;
   9917 		new_phytype = WMPHY_BM;
   9918 		new_readreg = wm_gmii_bm_readreg;
   9919 		new_writereg = wm_gmii_bm_writereg;
   9920 	} else if (sc->sc_type >= WM_T_PCH) {
   9921 		/* All PCH* use _hv_ */
   9922 		new_readreg = wm_gmii_hv_readreg;
   9923 		new_writereg = wm_gmii_hv_writereg;
   9924 	} else if (sc->sc_type >= WM_T_ICH8) {
   9925 		/* non-82567 ICH8, 9 and 10 */
   9926 		new_readreg = wm_gmii_i82544_readreg;
   9927 		new_writereg = wm_gmii_i82544_writereg;
   9928 	} else if (sc->sc_type >= WM_T_80003) {
   9929 		/* 80003 */
   9930 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9931 		    && (new_phytype != WMPHY_GG82563)
   9932 		    && (new_phytype != WMPHY_UNKNOWN))
   9933 			doubt_phytype = new_phytype;
   9934 		new_phytype = WMPHY_GG82563;
   9935 		new_readreg = wm_gmii_i80003_readreg;
   9936 		new_writereg = wm_gmii_i80003_writereg;
   9937 	} else if (sc->sc_type >= WM_T_I210) {
   9938 		/* I210 and I211 */
   9939 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9940 		    && (new_phytype != WMPHY_I210)
   9941 		    && (new_phytype != WMPHY_UNKNOWN))
   9942 			doubt_phytype = new_phytype;
   9943 		new_phytype = WMPHY_I210;
   9944 		new_readreg = wm_gmii_gs40g_readreg;
   9945 		new_writereg = wm_gmii_gs40g_writereg;
   9946 	} else if (sc->sc_type >= WM_T_82580) {
   9947 		/* 82580, I350 and I354 */
   9948 		new_readreg = wm_gmii_82580_readreg;
   9949 		new_writereg = wm_gmii_82580_writereg;
   9950 	} else if (sc->sc_type >= WM_T_82544) {
   9951 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9952 		new_readreg = wm_gmii_i82544_readreg;
   9953 		new_writereg = wm_gmii_i82544_writereg;
   9954 	} else {
   9955 		new_readreg = wm_gmii_i82543_readreg;
   9956 		new_writereg = wm_gmii_i82543_writereg;
   9957 	}
   9958 
   9959 	if (new_phytype == WMPHY_BM) {
   9960 		/* All BM use _bm_ */
   9961 		new_readreg = wm_gmii_bm_readreg;
   9962 		new_writereg = wm_gmii_bm_writereg;
   9963 	}
   9964 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9965 		/* All PCH* use _hv_ */
   9966 		new_readreg = wm_gmii_hv_readreg;
   9967 		new_writereg = wm_gmii_hv_writereg;
   9968 	}
   9969 
   9970 	/* Diag output */
   9971 	if (doubt_phytype != WMPHY_UNKNOWN)
   9972 		aprint_error_dev(dev, "Assumed new PHY type was "
   9973 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9974 		    new_phytype);
   9975 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9976 	    && (sc->sc_phytype != new_phytype ))
   9977 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9978 		    "was incorrect. New PHY type = %u\n",
   9979 		    sc->sc_phytype, new_phytype);
   9980 
   9981 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9982 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9983 
   9984 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9985 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9986 		    "function was incorrect.\n");
   9987 
   9988 	/* Update now */
   9989 	sc->sc_phytype = new_phytype;
   9990 	mii->mii_readreg = new_readreg;
   9991 	mii->mii_writereg = new_writereg;
   9992 	if (new_readreg == wm_gmii_hv_readreg) {
   9993 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9994 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9995 	} else if (new_readreg == wm_sgmii_readreg) {
   9996 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   9997 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   9998 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9999 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10000 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10001 	}
   10002 }
   10003 
   10004 /*
   10005  * wm_get_phy_id_82575:
   10006  *
   10007  * Return PHY ID. Return -1 if it failed.
   10008  */
   10009 static int
   10010 wm_get_phy_id_82575(struct wm_softc *sc)
   10011 {
   10012 	uint32_t reg;
   10013 	int phyid = -1;
   10014 
   10015 	/* XXX */
   10016 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10017 		return -1;
   10018 
   10019 	if (wm_sgmii_uses_mdio(sc)) {
   10020 		switch (sc->sc_type) {
   10021 		case WM_T_82575:
   10022 		case WM_T_82576:
   10023 			reg = CSR_READ(sc, WMREG_MDIC);
   10024 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10025 			break;
   10026 		case WM_T_82580:
   10027 		case WM_T_I350:
   10028 		case WM_T_I354:
   10029 		case WM_T_I210:
   10030 		case WM_T_I211:
   10031 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10032 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10033 			break;
   10034 		default:
   10035 			return -1;
   10036 		}
   10037 	}
   10038 
   10039 	return phyid;
   10040 }
   10041 
   10042 
   10043 /*
   10044  * wm_gmii_mediainit:
   10045  *
   10046  *	Initialize media for use on 1000BASE-T devices.
   10047  */
   10048 static void
   10049 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10050 {
   10051 	device_t dev = sc->sc_dev;
   10052 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10053 	struct mii_data *mii = &sc->sc_mii;
   10054 	uint32_t reg;
   10055 
   10056 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10057 		device_xname(sc->sc_dev), __func__));
   10058 
   10059 	/* We have GMII. */
   10060 	sc->sc_flags |= WM_F_HAS_MII;
   10061 
   10062 	if (sc->sc_type == WM_T_80003)
   10063 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10064 	else
   10065 		sc->sc_tipg = TIPG_1000T_DFLT;
   10066 
   10067 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10068 	if ((sc->sc_type == WM_T_82580)
   10069 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10070 	    || (sc->sc_type == WM_T_I211)) {
   10071 		reg = CSR_READ(sc, WMREG_PHPM);
   10072 		reg &= ~PHPM_GO_LINK_D;
   10073 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10074 	}
   10075 
   10076 	/*
   10077 	 * Let the chip set speed/duplex on its own based on
   10078 	 * signals from the PHY.
   10079 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10080 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10081 	 */
   10082 	sc->sc_ctrl |= CTRL_SLU;
   10083 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10084 
   10085 	/* Initialize our media structures and probe the GMII. */
   10086 	mii->mii_ifp = ifp;
   10087 
   10088 	mii->mii_statchg = wm_gmii_statchg;
   10089 
   10090 	/* get PHY control from SMBus to PCIe */
   10091 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10092 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10093 	    || (sc->sc_type == WM_T_PCH_CNP))
   10094 		wm_init_phy_workarounds_pchlan(sc);
   10095 
   10096 	wm_gmii_reset(sc);
   10097 
   10098 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10099 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10100 	    wm_gmii_mediastatus);
   10101 
   10102 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10103 	    || (sc->sc_type == WM_T_82580)
   10104 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10105 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10106 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10107 			/* Attach only one port */
   10108 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10109 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10110 		} else {
   10111 			int i, id;
   10112 			uint32_t ctrl_ext;
   10113 
   10114 			id = wm_get_phy_id_82575(sc);
   10115 			if (id != -1) {
   10116 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10117 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10118 			}
   10119 			if ((id == -1)
   10120 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10121 				/* Power on sgmii phy if it is disabled */
   10122 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10123 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10124 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10125 				CSR_WRITE_FLUSH(sc);
   10126 				delay(300*1000); /* XXX too long */
   10127 
   10128 				/* from 1 to 8 */
   10129 				for (i = 1; i < 8; i++)
   10130 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10131 					    0xffffffff, i, MII_OFFSET_ANY,
   10132 					    MIIF_DOPAUSE);
   10133 
   10134 				/* restore previous sfp cage power state */
   10135 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10136 			}
   10137 		}
   10138 	} else
   10139 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10140 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10141 
   10142 	/*
   10143 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10144 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10145 	 */
   10146 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10147 		|| (sc->sc_type == WM_T_PCH_SPT)
   10148 		|| (sc->sc_type == WM_T_PCH_CNP))
   10149 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10150 		wm_set_mdio_slow_mode_hv(sc);
   10151 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10152 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10153 	}
   10154 
   10155 	/*
   10156 	 * (For ICH8 variants)
   10157 	 * If PHY detection failed, use BM's r/w function and retry.
   10158 	 */
   10159 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10160 		/* if failed, retry with *_bm_* */
   10161 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10162 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10163 		    sc->sc_phytype);
   10164 		sc->sc_phytype = WMPHY_BM;
   10165 		mii->mii_readreg = wm_gmii_bm_readreg;
   10166 		mii->mii_writereg = wm_gmii_bm_writereg;
   10167 
   10168 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10169 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10170 	}
   10171 
   10172 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10173 		/* Any PHY wasn't find */
   10174 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10175 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10176 		sc->sc_phytype = WMPHY_NONE;
   10177 	} else {
   10178 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10179 
   10180 		/*
   10181 		 * PHY Found! Check PHY type again by the second call of
   10182 		 * wm_gmii_setup_phytype.
   10183 		 */
   10184 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10185 		    child->mii_mpd_model);
   10186 
   10187 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10188 	}
   10189 }
   10190 
   10191 /*
   10192  * wm_gmii_mediachange:	[ifmedia interface function]
   10193  *
   10194  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10195  */
   10196 static int
   10197 wm_gmii_mediachange(struct ifnet *ifp)
   10198 {
   10199 	struct wm_softc *sc = ifp->if_softc;
   10200 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10201 	int rc;
   10202 
   10203 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10204 		device_xname(sc->sc_dev), __func__));
   10205 	if ((ifp->if_flags & IFF_UP) == 0)
   10206 		return 0;
   10207 
   10208 	/* Disable D0 LPLU. */
   10209 	wm_lplu_d0_disable(sc);
   10210 
   10211 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10212 	sc->sc_ctrl |= CTRL_SLU;
   10213 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10214 	    || (sc->sc_type > WM_T_82543)) {
   10215 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10216 	} else {
   10217 		sc->sc_ctrl &= ~CTRL_ASDE;
   10218 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10219 		if (ife->ifm_media & IFM_FDX)
   10220 			sc->sc_ctrl |= CTRL_FD;
   10221 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10222 		case IFM_10_T:
   10223 			sc->sc_ctrl |= CTRL_SPEED_10;
   10224 			break;
   10225 		case IFM_100_TX:
   10226 			sc->sc_ctrl |= CTRL_SPEED_100;
   10227 			break;
   10228 		case IFM_1000_T:
   10229 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10230 			break;
   10231 		case IFM_NONE:
   10232 			/* There is no specific setting for IFM_NONE */
   10233 			break;
   10234 		default:
   10235 			panic("wm_gmii_mediachange: bad media 0x%x",
   10236 			    ife->ifm_media);
   10237 		}
   10238 	}
   10239 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10240 	CSR_WRITE_FLUSH(sc);
   10241 	if (sc->sc_type <= WM_T_82543)
   10242 		wm_gmii_reset(sc);
   10243 
   10244 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10245 		return 0;
   10246 	return rc;
   10247 }
   10248 
   10249 /*
   10250  * wm_gmii_mediastatus:	[ifmedia interface function]
   10251  *
   10252  *	Get the current interface media status on a 1000BASE-T device.
   10253  */
   10254 static void
   10255 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10256 {
   10257 	struct wm_softc *sc = ifp->if_softc;
   10258 
   10259 	ether_mediastatus(ifp, ifmr);
   10260 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10261 	    | sc->sc_flowflags;
   10262 }
   10263 
   10264 #define	MDI_IO		CTRL_SWDPIN(2)
   10265 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10266 #define	MDI_CLK		CTRL_SWDPIN(3)
   10267 
   10268 static void
   10269 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10270 {
   10271 	uint32_t i, v;
   10272 
   10273 	v = CSR_READ(sc, WMREG_CTRL);
   10274 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10275 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10276 
   10277 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10278 		if (data & i)
   10279 			v |= MDI_IO;
   10280 		else
   10281 			v &= ~MDI_IO;
   10282 		CSR_WRITE(sc, WMREG_CTRL, v);
   10283 		CSR_WRITE_FLUSH(sc);
   10284 		delay(10);
   10285 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10286 		CSR_WRITE_FLUSH(sc);
   10287 		delay(10);
   10288 		CSR_WRITE(sc, WMREG_CTRL, v);
   10289 		CSR_WRITE_FLUSH(sc);
   10290 		delay(10);
   10291 	}
   10292 }
   10293 
   10294 static uint16_t
   10295 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10296 {
   10297 	uint32_t v, i;
   10298 	uint16_t data = 0;
   10299 
   10300 	v = CSR_READ(sc, WMREG_CTRL);
   10301 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10302 	v |= CTRL_SWDPIO(3);
   10303 
   10304 	CSR_WRITE(sc, WMREG_CTRL, v);
   10305 	CSR_WRITE_FLUSH(sc);
   10306 	delay(10);
   10307 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10308 	CSR_WRITE_FLUSH(sc);
   10309 	delay(10);
   10310 	CSR_WRITE(sc, WMREG_CTRL, v);
   10311 	CSR_WRITE_FLUSH(sc);
   10312 	delay(10);
   10313 
   10314 	for (i = 0; i < 16; i++) {
   10315 		data <<= 1;
   10316 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10317 		CSR_WRITE_FLUSH(sc);
   10318 		delay(10);
   10319 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10320 			data |= 1;
   10321 		CSR_WRITE(sc, WMREG_CTRL, v);
   10322 		CSR_WRITE_FLUSH(sc);
   10323 		delay(10);
   10324 	}
   10325 
   10326 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10327 	CSR_WRITE_FLUSH(sc);
   10328 	delay(10);
   10329 	CSR_WRITE(sc, WMREG_CTRL, v);
   10330 	CSR_WRITE_FLUSH(sc);
   10331 	delay(10);
   10332 
   10333 	return data;
   10334 }
   10335 
   10336 #undef MDI_IO
   10337 #undef MDI_DIR
   10338 #undef MDI_CLK
   10339 
   10340 /*
   10341  * wm_gmii_i82543_readreg:	[mii interface function]
   10342  *
   10343  *	Read a PHY register on the GMII (i82543 version).
   10344  */
   10345 static int
   10346 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10347 {
   10348 	struct wm_softc *sc = device_private(dev);
   10349 
   10350 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10351 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10352 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10353 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10354 
   10355 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10356 		device_xname(dev), phy, reg, *val));
   10357 
   10358 	return 0;
   10359 }
   10360 
   10361 /*
   10362  * wm_gmii_i82543_writereg:	[mii interface function]
   10363  *
   10364  *	Write a PHY register on the GMII (i82543 version).
   10365  */
   10366 static int
   10367 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10368 {
   10369 	struct wm_softc *sc = device_private(dev);
   10370 
   10371 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10372 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10373 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10374 	    (MII_COMMAND_START << 30), 32);
   10375 
   10376 	return 0;
   10377 }
   10378 
   10379 /*
   10380  * wm_gmii_mdic_readreg:	[mii interface function]
   10381  *
   10382  *	Read a PHY register on the GMII.
   10383  */
   10384 static int
   10385 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10386 {
   10387 	struct wm_softc *sc = device_private(dev);
   10388 	uint32_t mdic = 0;
   10389 	int i;
   10390 
   10391 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10392 	    && (reg > MII_ADDRMASK)) {
   10393 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10394 		    __func__, sc->sc_phytype, reg);
   10395 		reg &= MII_ADDRMASK;
   10396 	}
   10397 
   10398 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10399 	    MDIC_REGADD(reg));
   10400 
   10401 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10402 		delay(50);
   10403 		mdic = CSR_READ(sc, WMREG_MDIC);
   10404 		if (mdic & MDIC_READY)
   10405 			break;
   10406 	}
   10407 
   10408 	if ((mdic & MDIC_READY) == 0) {
   10409 		DPRINTF(WM_DEBUG_GMII,
   10410 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10411 			device_xname(dev), phy, reg));
   10412 		return ETIMEDOUT;
   10413 	} else if (mdic & MDIC_E) {
   10414 		/* This is normal if no PHY is present. */
   10415 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10416 			device_xname(sc->sc_dev), phy, reg));
   10417 		return -1;
   10418 	} else
   10419 		*val = MDIC_DATA(mdic);
   10420 
   10421 	/*
   10422 	 * Allow some time after each MDIC transaction to avoid
   10423 	 * reading duplicate data in the next MDIC transaction.
   10424 	 */
   10425 	if (sc->sc_type == WM_T_PCH2)
   10426 		delay(100);
   10427 
   10428 	return 0;
   10429 }
   10430 
   10431 /*
   10432  * wm_gmii_mdic_writereg:	[mii interface function]
   10433  *
   10434  *	Write a PHY register on the GMII.
   10435  */
   10436 static int
   10437 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10438 {
   10439 	struct wm_softc *sc = device_private(dev);
   10440 	uint32_t mdic = 0;
   10441 	int i;
   10442 
   10443 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10444 	    && (reg > MII_ADDRMASK)) {
   10445 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10446 		    __func__, sc->sc_phytype, reg);
   10447 		reg &= MII_ADDRMASK;
   10448 	}
   10449 
   10450 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10451 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10452 
   10453 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10454 		delay(50);
   10455 		mdic = CSR_READ(sc, WMREG_MDIC);
   10456 		if (mdic & MDIC_READY)
   10457 			break;
   10458 	}
   10459 
   10460 	if ((mdic & MDIC_READY) == 0) {
   10461 		DPRINTF(WM_DEBUG_GMII,
   10462 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10463 			device_xname(dev), phy, reg));
   10464 		return ETIMEDOUT;
   10465 	} else if (mdic & MDIC_E) {
   10466 		DPRINTF(WM_DEBUG_GMII,
   10467 		    ("%s: MDIC write error: phy %d reg %d\n",
   10468 			device_xname(dev), phy, reg));
   10469 		return -1;
   10470 	}
   10471 
   10472 	/*
   10473 	 * Allow some time after each MDIC transaction to avoid
   10474 	 * reading duplicate data in the next MDIC transaction.
   10475 	 */
   10476 	if (sc->sc_type == WM_T_PCH2)
   10477 		delay(100);
   10478 
   10479 	return 0;
   10480 }
   10481 
   10482 /*
   10483  * wm_gmii_i82544_readreg:	[mii interface function]
   10484  *
   10485  *	Read a PHY register on the GMII.
   10486  */
   10487 static int
   10488 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10489 {
   10490 	struct wm_softc *sc = device_private(dev);
   10491 	int rv;
   10492 
   10493 	if (sc->phy.acquire(sc)) {
   10494 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10495 		return -1;
   10496 	}
   10497 
   10498 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10499 
   10500 	sc->phy.release(sc);
   10501 
   10502 	return rv;
   10503 }
   10504 
   10505 static int
   10506 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10507 {
   10508 	struct wm_softc *sc = device_private(dev);
   10509 
   10510 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10511 		switch (sc->sc_phytype) {
   10512 		case WMPHY_IGP:
   10513 		case WMPHY_IGP_2:
   10514 		case WMPHY_IGP_3:
   10515 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10516 			    reg);
   10517 			break;
   10518 		default:
   10519 #ifdef WM_DEBUG
   10520 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10521 			    __func__, sc->sc_phytype, reg);
   10522 #endif
   10523 			break;
   10524 		}
   10525 	}
   10526 
   10527 	wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10528 
   10529 	return 0;
   10530 }
   10531 
   10532 /*
   10533  * wm_gmii_i82544_writereg:	[mii interface function]
   10534  *
   10535  *	Write a PHY register on the GMII.
   10536  */
   10537 static int
   10538 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10539 {
   10540 	struct wm_softc *sc = device_private(dev);
   10541 	int rv;
   10542 
   10543 	if (sc->phy.acquire(sc)) {
   10544 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10545 		return -1;
   10546 	}
   10547 
   10548 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10549 	sc->phy.release(sc);
   10550 
   10551 	return rv;
   10552 }
   10553 
   10554 static int
   10555 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10556 {
   10557 	struct wm_softc *sc = device_private(dev);
   10558 
   10559 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10560 		switch (sc->sc_phytype) {
   10561 		case WMPHY_IGP:
   10562 		case WMPHY_IGP_2:
   10563 		case WMPHY_IGP_3:
   10564 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10565 			    reg);
   10566 			break;
   10567 		default:
   10568 #ifdef WM_DEBUG
   10569 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10570 			    __func__, sc->sc_phytype, reg);
   10571 #endif
   10572 			break;
   10573 		}
   10574 	}
   10575 
   10576 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10577 
   10578 	return 0;
   10579 }
   10580 
   10581 /*
   10582  * wm_gmii_i80003_readreg:	[mii interface function]
   10583  *
   10584  *	Read a PHY register on the kumeran
   10585  * This could be handled by the PHY layer if we didn't have to lock the
   10586  * ressource ...
   10587  */
   10588 static int
   10589 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10590 {
   10591 	struct wm_softc *sc = device_private(dev);
   10592 	int page_select;
   10593 	uint16_t temp, temp2;
   10594 	int rv = 0;
   10595 
   10596 	if (phy != 1) /* only one PHY on kumeran bus */
   10597 		return -1;
   10598 
   10599 	if (sc->phy.acquire(sc)) {
   10600 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10601 		return -1;
   10602 	}
   10603 
   10604 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10605 		page_select = GG82563_PHY_PAGE_SELECT;
   10606 	else {
   10607 		/*
   10608 		 * Use Alternative Page Select register to access registers
   10609 		 * 30 and 31.
   10610 		 */
   10611 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10612 	}
   10613 	temp = reg >> GG82563_PAGE_SHIFT;
   10614 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10615 		goto out;
   10616 
   10617 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10618 		/*
   10619 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10620 		 * register.
   10621 		 */
   10622 		delay(200);
   10623 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10624 		if (temp2 != temp) {
   10625 			device_printf(dev, "%s failed\n", __func__);
   10626 			rv = -1;
   10627 			goto out;
   10628 		}
   10629 		delay(200);
   10630 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10631 		delay(200);
   10632 	} else
   10633 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10634 
   10635 out:
   10636 	sc->phy.release(sc);
   10637 	return rv;
   10638 }
   10639 
   10640 /*
   10641  * wm_gmii_i80003_writereg:	[mii interface function]
   10642  *
   10643  *	Write a PHY register on the kumeran.
   10644  * This could be handled by the PHY layer if we didn't have to lock the
   10645  * ressource ...
   10646  */
   10647 static int
   10648 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10649 {
   10650 	struct wm_softc *sc = device_private(dev);
   10651 	int page_select, rv;
   10652 	uint16_t temp, temp2;
   10653 
   10654 	if (phy != 1) /* only one PHY on kumeran bus */
   10655 		return -1;
   10656 
   10657 	if (sc->phy.acquire(sc)) {
   10658 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10659 		return -1;
   10660 	}
   10661 
   10662 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10663 		page_select = GG82563_PHY_PAGE_SELECT;
   10664 	else {
   10665 		/*
   10666 		 * Use Alternative Page Select register to access registers
   10667 		 * 30 and 31.
   10668 		 */
   10669 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10670 	}
   10671 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10672 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10673 		goto out;
   10674 
   10675 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10676 		/*
   10677 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10678 		 * register.
   10679 		 */
   10680 		delay(200);
   10681 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10682 		if (temp2 != temp) {
   10683 			device_printf(dev, "%s failed\n", __func__);
   10684 			rv = -1;
   10685 			goto out;
   10686 		}
   10687 		delay(200);
   10688 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10689 		delay(200);
   10690 	} else
   10691 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10692 
   10693 out:
   10694 	sc->phy.release(sc);
   10695 	return rv;
   10696 }
   10697 
   10698 /*
   10699  * wm_gmii_bm_readreg:	[mii interface function]
   10700  *
   10701  *	Read a PHY register on the kumeran
   10702  * This could be handled by the PHY layer if we didn't have to lock the
   10703  * ressource ...
   10704  */
   10705 static int
   10706 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10707 {
   10708 	struct wm_softc *sc = device_private(dev);
   10709 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10710 	int rv;
   10711 
   10712 	if (sc->phy.acquire(sc)) {
   10713 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10714 		return -1;
   10715 	}
   10716 
   10717 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10718 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10719 		    || (reg == 31)) ? 1 : phy;
   10720 	/* Page 800 works differently than the rest so it has its own func */
   10721 	if (page == BM_WUC_PAGE) {
   10722 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10723 		goto release;
   10724 	}
   10725 
   10726 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10727 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10728 		    && (sc->sc_type != WM_T_82583))
   10729 			rv = wm_gmii_mdic_writereg(dev, phy,
   10730 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10731 		else
   10732 			rv = wm_gmii_mdic_writereg(dev, phy,
   10733 			    BME1000_PHY_PAGE_SELECT, page);
   10734 		if (rv != 0)
   10735 			goto release;
   10736 	}
   10737 
   10738 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10739 
   10740 release:
   10741 	sc->phy.release(sc);
   10742 	return rv;
   10743 }
   10744 
   10745 /*
   10746  * wm_gmii_bm_writereg:	[mii interface function]
   10747  *
   10748  *	Write a PHY register on the kumeran.
   10749  * This could be handled by the PHY layer if we didn't have to lock the
   10750  * ressource ...
   10751  */
   10752 static int
   10753 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10754 {
   10755 	struct wm_softc *sc = device_private(dev);
   10756 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10757 	int rv;
   10758 
   10759 	if (sc->phy.acquire(sc)) {
   10760 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10761 		return -1;
   10762 	}
   10763 
   10764 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10765 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10766 		    || (reg == 31)) ? 1 : phy;
   10767 	/* Page 800 works differently than the rest so it has its own func */
   10768 	if (page == BM_WUC_PAGE) {
   10769 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10770 		goto release;
   10771 	}
   10772 
   10773 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10774 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10775 		    && (sc->sc_type != WM_T_82583))
   10776 			rv = wm_gmii_mdic_writereg(dev, phy,
   10777 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10778 		else
   10779 			rv = wm_gmii_mdic_writereg(dev, phy,
   10780 			    BME1000_PHY_PAGE_SELECT, page);
   10781 		if (rv != 0)
   10782 			goto release;
   10783 	}
   10784 
   10785 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10786 
   10787 release:
   10788 	sc->phy.release(sc);
   10789 	return rv;
   10790 }
   10791 
   10792 /*
   10793  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10794  *  @dev: pointer to the HW structure
   10795  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10796  *
   10797  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10798  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10799  */
   10800 static int
   10801 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10802 {
   10803 	uint16_t temp;
   10804 	int rv;
   10805 
   10806 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10807 		device_xname(dev), __func__));
   10808 
   10809 	if (!phy_regp)
   10810 		return -1;
   10811 
   10812 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10813 
   10814 	/* Select Port Control Registers page */
   10815 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10816 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10817 	if (rv != 0)
   10818 		return rv;
   10819 
   10820 	/* Read WUCE and save it */
   10821 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10822 	if (rv != 0)
   10823 		return rv;
   10824 
   10825 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10826 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10827 	 */
   10828 	temp = *phy_regp;
   10829 	temp |= BM_WUC_ENABLE_BIT;
   10830 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10831 
   10832 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10833 		return rv;
   10834 
   10835 	/* Select Host Wakeup Registers page - caller now able to write
   10836 	 * registers on the Wakeup registers page
   10837 	 */
   10838 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10839 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10840 }
   10841 
   10842 /*
   10843  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10844  *  @dev: pointer to the HW structure
   10845  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10846  *
   10847  *  Restore BM_WUC_ENABLE_REG to its original value.
   10848  *
   10849  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10850  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10851  *  caller.
   10852  */
   10853 static int
   10854 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10855 {
   10856 
   10857 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10858 		device_xname(dev), __func__));
   10859 
   10860 	if (!phy_regp)
   10861 		return -1;
   10862 
   10863 	/* Select Port Control Registers page */
   10864 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10865 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10866 
   10867 	/* Restore 769.17 to its original value */
   10868 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10869 
   10870 	return 0;
   10871 }
   10872 
   10873 /*
   10874  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10875  *  @sc: pointer to the HW structure
   10876  *  @offset: register offset to be read or written
   10877  *  @val: pointer to the data to read or write
   10878  *  @rd: determines if operation is read or write
   10879  *  @page_set: BM_WUC_PAGE already set and access enabled
   10880  *
   10881  *  Read the PHY register at offset and store the retrieved information in
   10882  *  data, or write data to PHY register at offset.  Note the procedure to
   10883  *  access the PHY wakeup registers is different than reading the other PHY
   10884  *  registers. It works as such:
   10885  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   10886  *  2) Set page to 800 for host (801 if we were manageability)
   10887  *  3) Write the address using the address opcode (0x11)
   10888  *  4) Read or write the data using the data opcode (0x12)
   10889  *  5) Restore 769.17.2 to its original value
   10890  *
   10891  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   10892  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   10893  *
   10894  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   10895  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   10896  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   10897  */
   10898 static int
   10899 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   10900 	bool page_set)
   10901 {
   10902 	struct wm_softc *sc = device_private(dev);
   10903 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10904 	uint16_t page = BM_PHY_REG_PAGE(offset);
   10905 	uint16_t wuce;
   10906 	int rv = 0;
   10907 
   10908 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10909 		device_xname(dev), __func__));
   10910 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10911 	if ((sc->sc_type == WM_T_PCH)
   10912 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   10913 		device_printf(dev,
   10914 		    "Attempting to access page %d while gig enabled.\n", page);
   10915 	}
   10916 
   10917 	if (!page_set) {
   10918 		/* Enable access to PHY wakeup registers */
   10919 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   10920 		if (rv != 0) {
   10921 			device_printf(dev,
   10922 			    "%s: Could not enable PHY wakeup reg access\n",
   10923 			    __func__);
   10924 			return rv;
   10925 		}
   10926 	}
   10927 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   10928 		device_xname(sc->sc_dev), __func__, page, regnum));
   10929 
   10930 	/*
   10931 	 * 2) Access PHY wakeup register.
   10932 	 * See wm_access_phy_wakeup_reg_bm.
   10933 	 */
   10934 
   10935 	/* Write the Wakeup register page offset value using opcode 0x11 */
   10936 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10937 	if (rv != 0)
   10938 		return rv;
   10939 
   10940 	if (rd) {
   10941 		/* Read the Wakeup register page value using opcode 0x12 */
   10942 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   10943 	} else {
   10944 		/* Write the Wakeup register page value using opcode 0x12 */
   10945 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10946 	}
   10947 	if (rv != 0)
   10948 		return rv;
   10949 
   10950 	if (!page_set)
   10951 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   10952 
   10953 	return rv;
   10954 }
   10955 
   10956 /*
   10957  * wm_gmii_hv_readreg:	[mii interface function]
   10958  *
   10959  *	Read a PHY register on the kumeran
   10960  * This could be handled by the PHY layer if we didn't have to lock the
   10961  * ressource ...
   10962  */
   10963 static int
   10964 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10965 {
   10966 	struct wm_softc *sc = device_private(dev);
   10967 	int rv;
   10968 
   10969 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10970 		device_xname(dev), __func__));
   10971 	if (sc->phy.acquire(sc)) {
   10972 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10973 		return -1;
   10974 	}
   10975 
   10976 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   10977 	sc->phy.release(sc);
   10978 	return rv;
   10979 }
   10980 
   10981 static int
   10982 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10983 {
   10984 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10985 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10986 	int rv;
   10987 
   10988 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10989 
   10990 	/* Page 800 works differently than the rest so it has its own func */
   10991 	if (page == BM_WUC_PAGE)
   10992 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10993 
   10994 	/*
   10995 	 * Lower than page 768 works differently than the rest so it has its
   10996 	 * own func
   10997 	 */
   10998 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10999 		printf("gmii_hv_readreg!!!\n");
   11000 		return -1;
   11001 	}
   11002 
   11003 	/*
   11004 	 * XXX I21[789] documents say that the SMBus Address register is at
   11005 	 * PHY address 01, Page 0 (not 768), Register 26.
   11006 	 */
   11007 	if (page == HV_INTC_FC_PAGE_START)
   11008 		page = 0;
   11009 
   11010 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11011 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11012 		    page << BME1000_PAGE_SHIFT);
   11013 		if (rv != 0)
   11014 			return rv;
   11015 	}
   11016 
   11017 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11018 }
   11019 
   11020 /*
   11021  * wm_gmii_hv_writereg:	[mii interface function]
   11022  *
   11023  *	Write a PHY register on the kumeran.
   11024  * This could be handled by the PHY layer if we didn't have to lock the
   11025  * ressource ...
   11026  */
   11027 static int
   11028 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11029 {
   11030 	struct wm_softc *sc = device_private(dev);
   11031 	int rv;
   11032 
   11033 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11034 		device_xname(dev), __func__));
   11035 
   11036 	if (sc->phy.acquire(sc)) {
   11037 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11038 		return -1;
   11039 	}
   11040 
   11041 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11042 	sc->phy.release(sc);
   11043 
   11044 	return rv;
   11045 }
   11046 
   11047 static int
   11048 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11049 {
   11050 	struct wm_softc *sc = device_private(dev);
   11051 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11052 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11053 	int rv;
   11054 
   11055 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11056 
   11057 	/* Page 800 works differently than the rest so it has its own func */
   11058 	if (page == BM_WUC_PAGE)
   11059 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11060 		    false);
   11061 
   11062 	/*
   11063 	 * Lower than page 768 works differently than the rest so it has its
   11064 	 * own func
   11065 	 */
   11066 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11067 		printf("gmii_hv_writereg!!!\n");
   11068 		return -1;
   11069 	}
   11070 
   11071 	{
   11072 		/*
   11073 		 * XXX I21[789] documents say that the SMBus Address register
   11074 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11075 		 */
   11076 		if (page == HV_INTC_FC_PAGE_START)
   11077 			page = 0;
   11078 
   11079 		/*
   11080 		 * XXX Workaround MDIO accesses being disabled after entering
   11081 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11082 		 * register is set)
   11083 		 */
   11084 		if (sc->sc_phytype == WMPHY_82578) {
   11085 			struct mii_softc *child;
   11086 
   11087 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11088 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11089 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11090 			    && ((val & (1 << 11)) != 0)) {
   11091 				printf("XXX need workaround\n");
   11092 			}
   11093 		}
   11094 
   11095 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11096 			rv = wm_gmii_mdic_writereg(dev, 1,
   11097 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11098 			if (rv != 0)
   11099 				return rv;
   11100 		}
   11101 	}
   11102 
   11103 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11104 }
   11105 
   11106 /*
   11107  * wm_gmii_82580_readreg:	[mii interface function]
   11108  *
   11109  *	Read a PHY register on the 82580 and I350.
   11110  * This could be handled by the PHY layer if we didn't have to lock the
   11111  * ressource ...
   11112  */
   11113 static int
   11114 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11115 {
   11116 	struct wm_softc *sc = device_private(dev);
   11117 	int rv;
   11118 
   11119 	if (sc->phy.acquire(sc) != 0) {
   11120 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11121 		return -1;
   11122 	}
   11123 
   11124 #ifdef DIAGNOSTIC
   11125 	if (reg > MII_ADDRMASK) {
   11126 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11127 		    __func__, sc->sc_phytype, reg);
   11128 		reg &= MII_ADDRMASK;
   11129 	}
   11130 #endif
   11131 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11132 
   11133 	sc->phy.release(sc);
   11134 	return rv;
   11135 }
   11136 
   11137 /*
   11138  * wm_gmii_82580_writereg:	[mii interface function]
   11139  *
   11140  *	Write a PHY register on the 82580 and I350.
   11141  * This could be handled by the PHY layer if we didn't have to lock the
   11142  * ressource ...
   11143  */
   11144 static int
   11145 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11146 {
   11147 	struct wm_softc *sc = device_private(dev);
   11148 	int rv;
   11149 
   11150 	if (sc->phy.acquire(sc) != 0) {
   11151 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11152 		return -1;
   11153 	}
   11154 
   11155 #ifdef DIAGNOSTIC
   11156 	if (reg > MII_ADDRMASK) {
   11157 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11158 		    __func__, sc->sc_phytype, reg);
   11159 		reg &= MII_ADDRMASK;
   11160 	}
   11161 #endif
   11162 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11163 
   11164 	sc->phy.release(sc);
   11165 	return rv;
   11166 }
   11167 
   11168 /*
   11169  * wm_gmii_gs40g_readreg:	[mii interface function]
   11170  *
   11171  *	Read a PHY register on the I2100 and I211.
   11172  * This could be handled by the PHY layer if we didn't have to lock the
   11173  * ressource ...
   11174  */
   11175 static int
   11176 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11177 {
   11178 	struct wm_softc *sc = device_private(dev);
   11179 	int page, offset;
   11180 	int rv;
   11181 
   11182 	/* Acquire semaphore */
   11183 	if (sc->phy.acquire(sc)) {
   11184 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11185 		return -1;
   11186 	}
   11187 
   11188 	/* Page select */
   11189 	page = reg >> GS40G_PAGE_SHIFT;
   11190 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11191 	if (rv != 0)
   11192 		goto release;
   11193 
   11194 	/* Read reg */
   11195 	offset = reg & GS40G_OFFSET_MASK;
   11196 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11197 
   11198 release:
   11199 	sc->phy.release(sc);
   11200 	return rv;
   11201 }
   11202 
   11203 /*
   11204  * wm_gmii_gs40g_writereg:	[mii interface function]
   11205  *
   11206  *	Write a PHY register on the I210 and I211.
   11207  * This could be handled by the PHY layer if we didn't have to lock the
   11208  * ressource ...
   11209  */
   11210 static int
   11211 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11212 {
   11213 	struct wm_softc *sc = device_private(dev);
   11214 	uint16_t page;
   11215 	int offset, rv;
   11216 
   11217 	/* Acquire semaphore */
   11218 	if (sc->phy.acquire(sc)) {
   11219 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11220 		return -1;
   11221 	}
   11222 
   11223 	/* Page select */
   11224 	page = reg >> GS40G_PAGE_SHIFT;
   11225 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11226 	if (rv != 0)
   11227 		goto release;
   11228 
   11229 	/* Write reg */
   11230 	offset = reg & GS40G_OFFSET_MASK;
   11231 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11232 
   11233 release:
   11234 	/* Release semaphore */
   11235 	sc->phy.release(sc);
   11236 	return rv;
   11237 }
   11238 
   11239 /*
   11240  * wm_gmii_statchg:	[mii interface function]
   11241  *
   11242  *	Callback from MII layer when media changes.
   11243  */
   11244 static void
   11245 wm_gmii_statchg(struct ifnet *ifp)
   11246 {
   11247 	struct wm_softc *sc = ifp->if_softc;
   11248 	struct mii_data *mii = &sc->sc_mii;
   11249 
   11250 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11251 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11252 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11253 
   11254 	/*
   11255 	 * Get flow control negotiation result.
   11256 	 */
   11257 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11258 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11259 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11260 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11261 	}
   11262 
   11263 	if (sc->sc_flowflags & IFM_FLOW) {
   11264 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11265 			sc->sc_ctrl |= CTRL_TFCE;
   11266 			sc->sc_fcrtl |= FCRTL_XONE;
   11267 		}
   11268 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11269 			sc->sc_ctrl |= CTRL_RFCE;
   11270 	}
   11271 
   11272 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11273 		DPRINTF(WM_DEBUG_LINK,
   11274 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11275 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11276 	} else {
   11277 		DPRINTF(WM_DEBUG_LINK,
   11278 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11279 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11280 	}
   11281 
   11282 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11283 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11284 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11285 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11286 	if (sc->sc_type == WM_T_80003) {
   11287 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11288 		case IFM_1000_T:
   11289 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11290 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11291 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11292 			break;
   11293 		default:
   11294 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11295 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11296 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11297 			break;
   11298 		}
   11299 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11300 	}
   11301 }
   11302 
   11303 /* kumeran related (80003, ICH* and PCH*) */
   11304 
   11305 /*
   11306  * wm_kmrn_readreg:
   11307  *
   11308  *	Read a kumeran register
   11309  */
   11310 static int
   11311 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11312 {
   11313 	int rv;
   11314 
   11315 	if (sc->sc_type == WM_T_80003)
   11316 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11317 	else
   11318 		rv = sc->phy.acquire(sc);
   11319 	if (rv != 0) {
   11320 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11321 		    __func__);
   11322 		return rv;
   11323 	}
   11324 
   11325 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11326 
   11327 	if (sc->sc_type == WM_T_80003)
   11328 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11329 	else
   11330 		sc->phy.release(sc);
   11331 
   11332 	return rv;
   11333 }
   11334 
   11335 static int
   11336 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11337 {
   11338 
   11339 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11340 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11341 	    KUMCTRLSTA_REN);
   11342 	CSR_WRITE_FLUSH(sc);
   11343 	delay(2);
   11344 
   11345 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11346 
   11347 	return 0;
   11348 }
   11349 
   11350 /*
   11351  * wm_kmrn_writereg:
   11352  *
   11353  *	Write a kumeran register
   11354  */
   11355 static int
   11356 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11357 {
   11358 	int rv;
   11359 
   11360 	if (sc->sc_type == WM_T_80003)
   11361 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11362 	else
   11363 		rv = sc->phy.acquire(sc);
   11364 	if (rv != 0) {
   11365 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11366 		    __func__);
   11367 		return rv;
   11368 	}
   11369 
   11370 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11371 
   11372 	if (sc->sc_type == WM_T_80003)
   11373 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11374 	else
   11375 		sc->phy.release(sc);
   11376 
   11377 	return rv;
   11378 }
   11379 
   11380 static int
   11381 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11382 {
   11383 
   11384 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11385 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11386 
   11387 	return 0;
   11388 }
   11389 
   11390 /*
   11391  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11392  * This access method is different from IEEE MMD.
   11393  */
   11394 static int
   11395 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11396 {
   11397 	struct wm_softc *sc = device_private(dev);
   11398 	int rv;
   11399 
   11400 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11401 	if (rv != 0)
   11402 		return rv;
   11403 
   11404 	if (rd)
   11405 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11406 	else
   11407 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11408 	return rv;
   11409 }
   11410 
   11411 static int
   11412 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11413 {
   11414 
   11415 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11416 }
   11417 
   11418 static int
   11419 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11420 {
   11421 
   11422 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11423 }
   11424 
   11425 /* SGMII related */
   11426 
   11427 /*
   11428  * wm_sgmii_uses_mdio
   11429  *
   11430  * Check whether the transaction is to the internal PHY or the external
   11431  * MDIO interface. Return true if it's MDIO.
   11432  */
   11433 static bool
   11434 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11435 {
   11436 	uint32_t reg;
   11437 	bool ismdio = false;
   11438 
   11439 	switch (sc->sc_type) {
   11440 	case WM_T_82575:
   11441 	case WM_T_82576:
   11442 		reg = CSR_READ(sc, WMREG_MDIC);
   11443 		ismdio = ((reg & MDIC_DEST) != 0);
   11444 		break;
   11445 	case WM_T_82580:
   11446 	case WM_T_I350:
   11447 	case WM_T_I354:
   11448 	case WM_T_I210:
   11449 	case WM_T_I211:
   11450 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11451 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11452 		break;
   11453 	default:
   11454 		break;
   11455 	}
   11456 
   11457 	return ismdio;
   11458 }
   11459 
   11460 /*
   11461  * wm_sgmii_readreg:	[mii interface function]
   11462  *
   11463  *	Read a PHY register on the SGMII
   11464  * This could be handled by the PHY layer if we didn't have to lock the
   11465  * ressource ...
   11466  */
   11467 static int
   11468 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11469 {
   11470 	struct wm_softc *sc = device_private(dev);
   11471 	int rv;
   11472 
   11473 	if (sc->phy.acquire(sc)) {
   11474 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11475 		return -1;
   11476 	}
   11477 
   11478 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11479 
   11480 	sc->phy.release(sc);
   11481 	return rv;
   11482 }
   11483 
   11484 static int
   11485 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11486 {
   11487 	struct wm_softc *sc = device_private(dev);
   11488 	uint32_t i2ccmd;
   11489 	int i, rv;
   11490 
   11491 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11492 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11493 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11494 
   11495 	/* Poll the ready bit */
   11496 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11497 		delay(50);
   11498 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11499 		if (i2ccmd & I2CCMD_READY)
   11500 			break;
   11501 	}
   11502 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11503 		device_printf(dev, "I2CCMD Read did not complete\n");
   11504 		rv = ETIMEDOUT;
   11505 	}
   11506 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11507 		device_printf(dev, "I2CCMD Error bit set\n");
   11508 		rv = EIO;
   11509 	}
   11510 
   11511 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11512 
   11513 	return rv;
   11514 }
   11515 
   11516 /*
   11517  * wm_sgmii_writereg:	[mii interface function]
   11518  *
   11519  *	Write a PHY register on the SGMII.
   11520  * This could be handled by the PHY layer if we didn't have to lock the
   11521  * ressource ...
   11522  */
   11523 static int
   11524 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11525 {
   11526 	struct wm_softc *sc = device_private(dev);
   11527 	int rv;
   11528 
   11529 	if (sc->phy.acquire(sc) != 0) {
   11530 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11531 		return -1;
   11532 	}
   11533 
   11534 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11535 
   11536 	sc->phy.release(sc);
   11537 
   11538 	return rv;
   11539 }
   11540 
   11541 static int
   11542 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11543 {
   11544 	struct wm_softc *sc = device_private(dev);
   11545 	uint32_t i2ccmd;
   11546 	uint16_t swapdata;
   11547 	int rv = 0;
   11548 	int i;
   11549 
   11550 	/* Swap the data bytes for the I2C interface */
   11551 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11552 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11553 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11554 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11555 
   11556 	/* Poll the ready bit */
   11557 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11558 		delay(50);
   11559 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11560 		if (i2ccmd & I2CCMD_READY)
   11561 			break;
   11562 	}
   11563 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11564 		device_printf(dev, "I2CCMD Write did not complete\n");
   11565 		rv = ETIMEDOUT;
   11566 	}
   11567 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11568 		device_printf(dev, "I2CCMD Error bit set\n");
   11569 		rv = EIO;
   11570 	}
   11571 
   11572 	return rv;
   11573 }
   11574 
   11575 /* TBI related */
   11576 
   11577 static bool
   11578 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11579 {
   11580 	bool sig;
   11581 
   11582 	sig = ctrl & CTRL_SWDPIN(1);
   11583 
   11584 	/*
   11585 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11586 	 * detect a signal, 1 if they don't.
   11587 	 */
   11588 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11589 		sig = !sig;
   11590 
   11591 	return sig;
   11592 }
   11593 
   11594 /*
   11595  * wm_tbi_mediainit:
   11596  *
   11597  *	Initialize media for use on 1000BASE-X devices.
   11598  */
   11599 static void
   11600 wm_tbi_mediainit(struct wm_softc *sc)
   11601 {
   11602 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11603 	const char *sep = "";
   11604 
   11605 	if (sc->sc_type < WM_T_82543)
   11606 		sc->sc_tipg = TIPG_WM_DFLT;
   11607 	else
   11608 		sc->sc_tipg = TIPG_LG_DFLT;
   11609 
   11610 	sc->sc_tbi_serdes_anegticks = 5;
   11611 
   11612 	/* Initialize our media structures */
   11613 	sc->sc_mii.mii_ifp = ifp;
   11614 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11615 
   11616 	if ((sc->sc_type >= WM_T_82575)
   11617 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11618 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11619 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11620 	else
   11621 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11622 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11623 
   11624 	/*
   11625 	 * SWD Pins:
   11626 	 *
   11627 	 *	0 = Link LED (output)
   11628 	 *	1 = Loss Of Signal (input)
   11629 	 */
   11630 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11631 
   11632 	/* XXX Perhaps this is only for TBI */
   11633 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11634 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11635 
   11636 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11637 		sc->sc_ctrl &= ~CTRL_LRST;
   11638 
   11639 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11640 
   11641 #define	ADD(ss, mm, dd)							\
   11642 do {									\
   11643 	aprint_normal("%s%s", sep, ss);					\
   11644 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11645 	sep = ", ";							\
   11646 } while (/*CONSTCOND*/0)
   11647 
   11648 	aprint_normal_dev(sc->sc_dev, "");
   11649 
   11650 	if (sc->sc_type == WM_T_I354) {
   11651 		uint32_t status;
   11652 
   11653 		status = CSR_READ(sc, WMREG_STATUS);
   11654 		if (((status & STATUS_2P5_SKU) != 0)
   11655 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11656 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11657 		} else
   11658 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11659 	} else if (sc->sc_type == WM_T_82545) {
   11660 		/* Only 82545 is LX (XXX except SFP) */
   11661 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11662 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11663 	} else {
   11664 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11665 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11666 	}
   11667 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11668 	aprint_normal("\n");
   11669 
   11670 #undef ADD
   11671 
   11672 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11673 }
   11674 
   11675 /*
   11676  * wm_tbi_mediachange:	[ifmedia interface function]
   11677  *
   11678  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11679  */
   11680 static int
   11681 wm_tbi_mediachange(struct ifnet *ifp)
   11682 {
   11683 	struct wm_softc *sc = ifp->if_softc;
   11684 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11685 	uint32_t status, ctrl;
   11686 	bool signal;
   11687 	int i;
   11688 
   11689 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11690 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11691 		/* XXX need some work for >= 82571 and < 82575 */
   11692 		if (sc->sc_type < WM_T_82575)
   11693 			return 0;
   11694 	}
   11695 
   11696 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11697 	    || (sc->sc_type >= WM_T_82575))
   11698 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11699 
   11700 	sc->sc_ctrl &= ~CTRL_LRST;
   11701 	sc->sc_txcw = TXCW_ANE;
   11702 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11703 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11704 	else if (ife->ifm_media & IFM_FDX)
   11705 		sc->sc_txcw |= TXCW_FD;
   11706 	else
   11707 		sc->sc_txcw |= TXCW_HD;
   11708 
   11709 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11710 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11711 
   11712 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11713 		device_xname(sc->sc_dev), sc->sc_txcw));
   11714 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11715 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11716 	CSR_WRITE_FLUSH(sc);
   11717 	delay(1000);
   11718 
   11719 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11720 	signal = wm_tbi_havesignal(sc, ctrl);
   11721 
   11722 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11723 		signal));
   11724 
   11725 	if (signal) {
   11726 		/* Have signal; wait for the link to come up. */
   11727 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11728 			delay(10000);
   11729 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11730 				break;
   11731 		}
   11732 
   11733 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11734 			device_xname(sc->sc_dev),i));
   11735 
   11736 		status = CSR_READ(sc, WMREG_STATUS);
   11737 		DPRINTF(WM_DEBUG_LINK,
   11738 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11739 			device_xname(sc->sc_dev),status, STATUS_LU));
   11740 		if (status & STATUS_LU) {
   11741 			/* Link is up. */
   11742 			DPRINTF(WM_DEBUG_LINK,
   11743 			    ("%s: LINK: set media -> link up %s\n",
   11744 				device_xname(sc->sc_dev),
   11745 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11746 
   11747 			/*
   11748 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11749 			 * so we should update sc->sc_ctrl
   11750 			 */
   11751 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11752 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11753 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11754 			if (status & STATUS_FD)
   11755 				sc->sc_tctl |=
   11756 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11757 			else
   11758 				sc->sc_tctl |=
   11759 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11760 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11761 				sc->sc_fcrtl |= FCRTL_XONE;
   11762 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11763 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11764 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11765 			sc->sc_tbi_linkup = 1;
   11766 		} else {
   11767 			if (i == WM_LINKUP_TIMEOUT)
   11768 				wm_check_for_link(sc);
   11769 			/* Link is down. */
   11770 			DPRINTF(WM_DEBUG_LINK,
   11771 			    ("%s: LINK: set media -> link down\n",
   11772 				device_xname(sc->sc_dev)));
   11773 			sc->sc_tbi_linkup = 0;
   11774 		}
   11775 	} else {
   11776 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11777 			device_xname(sc->sc_dev)));
   11778 		sc->sc_tbi_linkup = 0;
   11779 	}
   11780 
   11781 	wm_tbi_serdes_set_linkled(sc);
   11782 
   11783 	return 0;
   11784 }
   11785 
   11786 /*
   11787  * wm_tbi_mediastatus:	[ifmedia interface function]
   11788  *
   11789  *	Get the current interface media status on a 1000BASE-X device.
   11790  */
   11791 static void
   11792 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11793 {
   11794 	struct wm_softc *sc = ifp->if_softc;
   11795 	uint32_t ctrl, status;
   11796 
   11797 	ifmr->ifm_status = IFM_AVALID;
   11798 	ifmr->ifm_active = IFM_ETHER;
   11799 
   11800 	status = CSR_READ(sc, WMREG_STATUS);
   11801 	if ((status & STATUS_LU) == 0) {
   11802 		ifmr->ifm_active |= IFM_NONE;
   11803 		return;
   11804 	}
   11805 
   11806 	ifmr->ifm_status |= IFM_ACTIVE;
   11807 	/* Only 82545 is LX */
   11808 	if (sc->sc_type == WM_T_82545)
   11809 		ifmr->ifm_active |= IFM_1000_LX;
   11810 	else
   11811 		ifmr->ifm_active |= IFM_1000_SX;
   11812 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11813 		ifmr->ifm_active |= IFM_FDX;
   11814 	else
   11815 		ifmr->ifm_active |= IFM_HDX;
   11816 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11817 	if (ctrl & CTRL_RFCE)
   11818 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11819 	if (ctrl & CTRL_TFCE)
   11820 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11821 }
   11822 
   11823 /* XXX TBI only */
   11824 static int
   11825 wm_check_for_link(struct wm_softc *sc)
   11826 {
   11827 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11828 	uint32_t rxcw;
   11829 	uint32_t ctrl;
   11830 	uint32_t status;
   11831 	bool signal;
   11832 
   11833 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11834 		device_xname(sc->sc_dev), __func__));
   11835 
   11836 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11837 		/* XXX need some work for >= 82571 */
   11838 		if (sc->sc_type >= WM_T_82571) {
   11839 			sc->sc_tbi_linkup = 1;
   11840 			return 0;
   11841 		}
   11842 	}
   11843 
   11844 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11845 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11846 	status = CSR_READ(sc, WMREG_STATUS);
   11847 	signal = wm_tbi_havesignal(sc, ctrl);
   11848 
   11849 	DPRINTF(WM_DEBUG_LINK,
   11850 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11851 		device_xname(sc->sc_dev), __func__, signal,
   11852 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11853 
   11854 	/*
   11855 	 * SWDPIN   LU RXCW
   11856 	 *	0    0	  0
   11857 	 *	0    0	  1	(should not happen)
   11858 	 *	0    1	  0	(should not happen)
   11859 	 *	0    1	  1	(should not happen)
   11860 	 *	1    0	  0	Disable autonego and force linkup
   11861 	 *	1    0	  1	got /C/ but not linkup yet
   11862 	 *	1    1	  0	(linkup)
   11863 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11864 	 *
   11865 	 */
   11866 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11867 		DPRINTF(WM_DEBUG_LINK,
   11868 		    ("%s: %s: force linkup and fullduplex\n",
   11869 			device_xname(sc->sc_dev), __func__));
   11870 		sc->sc_tbi_linkup = 0;
   11871 		/* Disable auto-negotiation in the TXCW register */
   11872 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11873 
   11874 		/*
   11875 		 * Force link-up and also force full-duplex.
   11876 		 *
   11877 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11878 		 * so we should update sc->sc_ctrl
   11879 		 */
   11880 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11881 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11882 	} else if (((status & STATUS_LU) != 0)
   11883 	    && ((rxcw & RXCW_C) != 0)
   11884 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11885 		sc->sc_tbi_linkup = 1;
   11886 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11887 			device_xname(sc->sc_dev),
   11888 			__func__));
   11889 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11890 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11891 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11892 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11893 			device_xname(sc->sc_dev), __func__));
   11894 	} else {
   11895 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11896 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11897 			status));
   11898 	}
   11899 
   11900 	return 0;
   11901 }
   11902 
   11903 /*
   11904  * wm_tbi_tick:
   11905  *
   11906  *	Check the link on TBI devices.
   11907  *	This function acts as mii_tick().
   11908  */
   11909 static void
   11910 wm_tbi_tick(struct wm_softc *sc)
   11911 {
   11912 	struct mii_data *mii = &sc->sc_mii;
   11913 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11914 	uint32_t status;
   11915 
   11916 	KASSERT(WM_CORE_LOCKED(sc));
   11917 
   11918 	status = CSR_READ(sc, WMREG_STATUS);
   11919 
   11920 	/* XXX is this needed? */
   11921 	(void)CSR_READ(sc, WMREG_RXCW);
   11922 	(void)CSR_READ(sc, WMREG_CTRL);
   11923 
   11924 	/* set link status */
   11925 	if ((status & STATUS_LU) == 0) {
   11926 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11927 			device_xname(sc->sc_dev)));
   11928 		sc->sc_tbi_linkup = 0;
   11929 	} else if (sc->sc_tbi_linkup == 0) {
   11930 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11931 			device_xname(sc->sc_dev),
   11932 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11933 		sc->sc_tbi_linkup = 1;
   11934 		sc->sc_tbi_serdes_ticks = 0;
   11935 	}
   11936 
   11937 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11938 		goto setled;
   11939 
   11940 	if ((status & STATUS_LU) == 0) {
   11941 		sc->sc_tbi_linkup = 0;
   11942 		/* If the timer expired, retry autonegotiation */
   11943 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11944 		    && (++sc->sc_tbi_serdes_ticks
   11945 			>= sc->sc_tbi_serdes_anegticks)) {
   11946 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11947 			sc->sc_tbi_serdes_ticks = 0;
   11948 			/*
   11949 			 * Reset the link, and let autonegotiation do
   11950 			 * its thing
   11951 			 */
   11952 			sc->sc_ctrl |= CTRL_LRST;
   11953 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11954 			CSR_WRITE_FLUSH(sc);
   11955 			delay(1000);
   11956 			sc->sc_ctrl &= ~CTRL_LRST;
   11957 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11958 			CSR_WRITE_FLUSH(sc);
   11959 			delay(1000);
   11960 			CSR_WRITE(sc, WMREG_TXCW,
   11961 			    sc->sc_txcw & ~TXCW_ANE);
   11962 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11963 		}
   11964 	}
   11965 
   11966 setled:
   11967 	wm_tbi_serdes_set_linkled(sc);
   11968 }
   11969 
   11970 /* SERDES related */
   11971 static void
   11972 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11973 {
   11974 	uint32_t reg;
   11975 
   11976 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11977 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11978 		return;
   11979 
   11980 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11981 	reg |= PCS_CFG_PCS_EN;
   11982 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11983 
   11984 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11985 	reg &= ~CTRL_EXT_SWDPIN(3);
   11986 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11987 	CSR_WRITE_FLUSH(sc);
   11988 }
   11989 
   11990 static int
   11991 wm_serdes_mediachange(struct ifnet *ifp)
   11992 {
   11993 	struct wm_softc *sc = ifp->if_softc;
   11994 	bool pcs_autoneg = true; /* XXX */
   11995 	uint32_t ctrl_ext, pcs_lctl, reg;
   11996 
   11997 	/* XXX Currently, this function is not called on 8257[12] */
   11998 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11999 	    || (sc->sc_type >= WM_T_82575))
   12000 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12001 
   12002 	wm_serdes_power_up_link_82575(sc);
   12003 
   12004 	sc->sc_ctrl |= CTRL_SLU;
   12005 
   12006 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12007 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12008 
   12009 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12010 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12011 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12012 	case CTRL_EXT_LINK_MODE_SGMII:
   12013 		pcs_autoneg = true;
   12014 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12015 		break;
   12016 	case CTRL_EXT_LINK_MODE_1000KX:
   12017 		pcs_autoneg = false;
   12018 		/* FALLTHROUGH */
   12019 	default:
   12020 		if ((sc->sc_type == WM_T_82575)
   12021 		    || (sc->sc_type == WM_T_82576)) {
   12022 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12023 				pcs_autoneg = false;
   12024 		}
   12025 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12026 		    | CTRL_FRCFDX;
   12027 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12028 	}
   12029 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12030 
   12031 	if (pcs_autoneg) {
   12032 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12033 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12034 
   12035 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12036 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12037 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12038 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12039 	} else
   12040 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12041 
   12042 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12043 
   12044 
   12045 	return 0;
   12046 }
   12047 
   12048 static void
   12049 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12050 {
   12051 	struct wm_softc *sc = ifp->if_softc;
   12052 	struct mii_data *mii = &sc->sc_mii;
   12053 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12054 	uint32_t pcs_adv, pcs_lpab, reg;
   12055 
   12056 	ifmr->ifm_status = IFM_AVALID;
   12057 	ifmr->ifm_active = IFM_ETHER;
   12058 
   12059 	/* Check PCS */
   12060 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12061 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12062 		ifmr->ifm_active |= IFM_NONE;
   12063 		sc->sc_tbi_linkup = 0;
   12064 		goto setled;
   12065 	}
   12066 
   12067 	sc->sc_tbi_linkup = 1;
   12068 	ifmr->ifm_status |= IFM_ACTIVE;
   12069 	if (sc->sc_type == WM_T_I354) {
   12070 		uint32_t status;
   12071 
   12072 		status = CSR_READ(sc, WMREG_STATUS);
   12073 		if (((status & STATUS_2P5_SKU) != 0)
   12074 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12075 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   12076 		} else
   12077 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   12078 	} else {
   12079 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12080 		case PCS_LSTS_SPEED_10:
   12081 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12082 			break;
   12083 		case PCS_LSTS_SPEED_100:
   12084 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12085 			break;
   12086 		case PCS_LSTS_SPEED_1000:
   12087 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12088 			break;
   12089 		default:
   12090 			device_printf(sc->sc_dev, "Unknown speed\n");
   12091 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12092 			break;
   12093 		}
   12094 	}
   12095 	if ((reg & PCS_LSTS_FDX) != 0)
   12096 		ifmr->ifm_active |= IFM_FDX;
   12097 	else
   12098 		ifmr->ifm_active |= IFM_HDX;
   12099 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12100 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12101 		/* Check flow */
   12102 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12103 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12104 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12105 			goto setled;
   12106 		}
   12107 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12108 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12109 		DPRINTF(WM_DEBUG_LINK,
   12110 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12111 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12112 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12113 			mii->mii_media_active |= IFM_FLOW
   12114 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12115 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12116 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12117 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12118 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12119 			mii->mii_media_active |= IFM_FLOW
   12120 			    | IFM_ETH_TXPAUSE;
   12121 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12122 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12123 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12124 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12125 			mii->mii_media_active |= IFM_FLOW
   12126 			    | IFM_ETH_RXPAUSE;
   12127 		}
   12128 	}
   12129 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12130 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12131 setled:
   12132 	wm_tbi_serdes_set_linkled(sc);
   12133 }
   12134 
   12135 /*
   12136  * wm_serdes_tick:
   12137  *
   12138  *	Check the link on serdes devices.
   12139  */
   12140 static void
   12141 wm_serdes_tick(struct wm_softc *sc)
   12142 {
   12143 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12144 	struct mii_data *mii = &sc->sc_mii;
   12145 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12146 	uint32_t reg;
   12147 
   12148 	KASSERT(WM_CORE_LOCKED(sc));
   12149 
   12150 	mii->mii_media_status = IFM_AVALID;
   12151 	mii->mii_media_active = IFM_ETHER;
   12152 
   12153 	/* Check PCS */
   12154 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12155 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12156 		mii->mii_media_status |= IFM_ACTIVE;
   12157 		sc->sc_tbi_linkup = 1;
   12158 		sc->sc_tbi_serdes_ticks = 0;
   12159 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12160 		if ((reg & PCS_LSTS_FDX) != 0)
   12161 			mii->mii_media_active |= IFM_FDX;
   12162 		else
   12163 			mii->mii_media_active |= IFM_HDX;
   12164 	} else {
   12165 		mii->mii_media_status |= IFM_NONE;
   12166 		sc->sc_tbi_linkup = 0;
   12167 		/* If the timer expired, retry autonegotiation */
   12168 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12169 		    && (++sc->sc_tbi_serdes_ticks
   12170 			>= sc->sc_tbi_serdes_anegticks)) {
   12171 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12172 			sc->sc_tbi_serdes_ticks = 0;
   12173 			/* XXX */
   12174 			wm_serdes_mediachange(ifp);
   12175 		}
   12176 	}
   12177 
   12178 	wm_tbi_serdes_set_linkled(sc);
   12179 }
   12180 
   12181 /* SFP related */
   12182 
   12183 static int
   12184 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12185 {
   12186 	uint32_t i2ccmd;
   12187 	int i;
   12188 
   12189 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12190 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12191 
   12192 	/* Poll the ready bit */
   12193 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12194 		delay(50);
   12195 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12196 		if (i2ccmd & I2CCMD_READY)
   12197 			break;
   12198 	}
   12199 	if ((i2ccmd & I2CCMD_READY) == 0)
   12200 		return -1;
   12201 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12202 		return -1;
   12203 
   12204 	*data = i2ccmd & 0x00ff;
   12205 
   12206 	return 0;
   12207 }
   12208 
   12209 static uint32_t
   12210 wm_sfp_get_media_type(struct wm_softc *sc)
   12211 {
   12212 	uint32_t ctrl_ext;
   12213 	uint8_t val = 0;
   12214 	int timeout = 3;
   12215 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12216 	int rv = -1;
   12217 
   12218 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12219 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12220 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12221 	CSR_WRITE_FLUSH(sc);
   12222 
   12223 	/* Read SFP module data */
   12224 	while (timeout) {
   12225 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12226 		if (rv == 0)
   12227 			break;
   12228 		delay(100*1000); /* XXX too big */
   12229 		timeout--;
   12230 	}
   12231 	if (rv != 0)
   12232 		goto out;
   12233 	switch (val) {
   12234 	case SFF_SFP_ID_SFF:
   12235 		aprint_normal_dev(sc->sc_dev,
   12236 		    "Module/Connector soldered to board\n");
   12237 		break;
   12238 	case SFF_SFP_ID_SFP:
   12239 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12240 		break;
   12241 	case SFF_SFP_ID_UNKNOWN:
   12242 		goto out;
   12243 	default:
   12244 		break;
   12245 	}
   12246 
   12247 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12248 	if (rv != 0) {
   12249 		goto out;
   12250 	}
   12251 
   12252 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12253 		mediatype = WM_MEDIATYPE_SERDES;
   12254 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12255 		sc->sc_flags |= WM_F_SGMII;
   12256 		mediatype = WM_MEDIATYPE_COPPER;
   12257 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12258 		sc->sc_flags |= WM_F_SGMII;
   12259 		mediatype = WM_MEDIATYPE_SERDES;
   12260 	}
   12261 
   12262 out:
   12263 	/* Restore I2C interface setting */
   12264 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12265 
   12266 	return mediatype;
   12267 }
   12268 
   12269 /*
   12270  * NVM related.
   12271  * Microwire, SPI (w/wo EERD) and Flash.
   12272  */
   12273 
   12274 /* Both spi and uwire */
   12275 
   12276 /*
   12277  * wm_eeprom_sendbits:
   12278  *
   12279  *	Send a series of bits to the EEPROM.
   12280  */
   12281 static void
   12282 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12283 {
   12284 	uint32_t reg;
   12285 	int x;
   12286 
   12287 	reg = CSR_READ(sc, WMREG_EECD);
   12288 
   12289 	for (x = nbits; x > 0; x--) {
   12290 		if (bits & (1U << (x - 1)))
   12291 			reg |= EECD_DI;
   12292 		else
   12293 			reg &= ~EECD_DI;
   12294 		CSR_WRITE(sc, WMREG_EECD, reg);
   12295 		CSR_WRITE_FLUSH(sc);
   12296 		delay(2);
   12297 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12298 		CSR_WRITE_FLUSH(sc);
   12299 		delay(2);
   12300 		CSR_WRITE(sc, WMREG_EECD, reg);
   12301 		CSR_WRITE_FLUSH(sc);
   12302 		delay(2);
   12303 	}
   12304 }
   12305 
   12306 /*
   12307  * wm_eeprom_recvbits:
   12308  *
   12309  *	Receive a series of bits from the EEPROM.
   12310  */
   12311 static void
   12312 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12313 {
   12314 	uint32_t reg, val;
   12315 	int x;
   12316 
   12317 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12318 
   12319 	val = 0;
   12320 	for (x = nbits; x > 0; x--) {
   12321 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12322 		CSR_WRITE_FLUSH(sc);
   12323 		delay(2);
   12324 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12325 			val |= (1U << (x - 1));
   12326 		CSR_WRITE(sc, WMREG_EECD, reg);
   12327 		CSR_WRITE_FLUSH(sc);
   12328 		delay(2);
   12329 	}
   12330 	*valp = val;
   12331 }
   12332 
   12333 /* Microwire */
   12334 
   12335 /*
   12336  * wm_nvm_read_uwire:
   12337  *
   12338  *	Read a word from the EEPROM using the MicroWire protocol.
   12339  */
   12340 static int
   12341 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12342 {
   12343 	uint32_t reg, val;
   12344 	int i;
   12345 
   12346 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12347 		device_xname(sc->sc_dev), __func__));
   12348 
   12349 	if (sc->nvm.acquire(sc) != 0)
   12350 		return -1;
   12351 
   12352 	for (i = 0; i < wordcnt; i++) {
   12353 		/* Clear SK and DI. */
   12354 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12355 		CSR_WRITE(sc, WMREG_EECD, reg);
   12356 
   12357 		/*
   12358 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12359 		 * and Xen.
   12360 		 *
   12361 		 * We use this workaround only for 82540 because qemu's
   12362 		 * e1000 act as 82540.
   12363 		 */
   12364 		if (sc->sc_type == WM_T_82540) {
   12365 			reg |= EECD_SK;
   12366 			CSR_WRITE(sc, WMREG_EECD, reg);
   12367 			reg &= ~EECD_SK;
   12368 			CSR_WRITE(sc, WMREG_EECD, reg);
   12369 			CSR_WRITE_FLUSH(sc);
   12370 			delay(2);
   12371 		}
   12372 		/* XXX: end of workaround */
   12373 
   12374 		/* Set CHIP SELECT. */
   12375 		reg |= EECD_CS;
   12376 		CSR_WRITE(sc, WMREG_EECD, reg);
   12377 		CSR_WRITE_FLUSH(sc);
   12378 		delay(2);
   12379 
   12380 		/* Shift in the READ command. */
   12381 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12382 
   12383 		/* Shift in address. */
   12384 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12385 
   12386 		/* Shift out the data. */
   12387 		wm_eeprom_recvbits(sc, &val, 16);
   12388 		data[i] = val & 0xffff;
   12389 
   12390 		/* Clear CHIP SELECT. */
   12391 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12392 		CSR_WRITE(sc, WMREG_EECD, reg);
   12393 		CSR_WRITE_FLUSH(sc);
   12394 		delay(2);
   12395 	}
   12396 
   12397 	sc->nvm.release(sc);
   12398 	return 0;
   12399 }
   12400 
   12401 /* SPI */
   12402 
   12403 /*
   12404  * Set SPI and FLASH related information from the EECD register.
   12405  * For 82541 and 82547, the word size is taken from EEPROM.
   12406  */
   12407 static int
   12408 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12409 {
   12410 	int size;
   12411 	uint32_t reg;
   12412 	uint16_t data;
   12413 
   12414 	reg = CSR_READ(sc, WMREG_EECD);
   12415 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12416 
   12417 	/* Read the size of NVM from EECD by default */
   12418 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12419 	switch (sc->sc_type) {
   12420 	case WM_T_82541:
   12421 	case WM_T_82541_2:
   12422 	case WM_T_82547:
   12423 	case WM_T_82547_2:
   12424 		/* Set dummy value to access EEPROM */
   12425 		sc->sc_nvm_wordsize = 64;
   12426 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12427 			aprint_error_dev(sc->sc_dev,
   12428 			    "%s: failed to read EEPROM size\n", __func__);
   12429 		}
   12430 		reg = data;
   12431 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12432 		if (size == 0)
   12433 			size = 6; /* 64 word size */
   12434 		else
   12435 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12436 		break;
   12437 	case WM_T_80003:
   12438 	case WM_T_82571:
   12439 	case WM_T_82572:
   12440 	case WM_T_82573: /* SPI case */
   12441 	case WM_T_82574: /* SPI case */
   12442 	case WM_T_82583: /* SPI case */
   12443 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12444 		if (size > 14)
   12445 			size = 14;
   12446 		break;
   12447 	case WM_T_82575:
   12448 	case WM_T_82576:
   12449 	case WM_T_82580:
   12450 	case WM_T_I350:
   12451 	case WM_T_I354:
   12452 	case WM_T_I210:
   12453 	case WM_T_I211:
   12454 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12455 		if (size > 15)
   12456 			size = 15;
   12457 		break;
   12458 	default:
   12459 		aprint_error_dev(sc->sc_dev,
   12460 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12461 		return -1;
   12462 		break;
   12463 	}
   12464 
   12465 	sc->sc_nvm_wordsize = 1 << size;
   12466 
   12467 	return 0;
   12468 }
   12469 
   12470 /*
   12471  * wm_nvm_ready_spi:
   12472  *
   12473  *	Wait for a SPI EEPROM to be ready for commands.
   12474  */
   12475 static int
   12476 wm_nvm_ready_spi(struct wm_softc *sc)
   12477 {
   12478 	uint32_t val;
   12479 	int usec;
   12480 
   12481 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12482 		device_xname(sc->sc_dev), __func__));
   12483 
   12484 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12485 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12486 		wm_eeprom_recvbits(sc, &val, 8);
   12487 		if ((val & SPI_SR_RDY) == 0)
   12488 			break;
   12489 	}
   12490 	if (usec >= SPI_MAX_RETRIES) {
   12491 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12492 		return -1;
   12493 	}
   12494 	return 0;
   12495 }
   12496 
   12497 /*
   12498  * wm_nvm_read_spi:
   12499  *
   12500  *	Read a work from the EEPROM using the SPI protocol.
   12501  */
   12502 static int
   12503 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12504 {
   12505 	uint32_t reg, val;
   12506 	int i;
   12507 	uint8_t opc;
   12508 	int rv = 0;
   12509 
   12510 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12511 		device_xname(sc->sc_dev), __func__));
   12512 
   12513 	if (sc->nvm.acquire(sc) != 0)
   12514 		return -1;
   12515 
   12516 	/* Clear SK and CS. */
   12517 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12518 	CSR_WRITE(sc, WMREG_EECD, reg);
   12519 	CSR_WRITE_FLUSH(sc);
   12520 	delay(2);
   12521 
   12522 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12523 		goto out;
   12524 
   12525 	/* Toggle CS to flush commands. */
   12526 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12527 	CSR_WRITE_FLUSH(sc);
   12528 	delay(2);
   12529 	CSR_WRITE(sc, WMREG_EECD, reg);
   12530 	CSR_WRITE_FLUSH(sc);
   12531 	delay(2);
   12532 
   12533 	opc = SPI_OPC_READ;
   12534 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12535 		opc |= SPI_OPC_A8;
   12536 
   12537 	wm_eeprom_sendbits(sc, opc, 8);
   12538 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12539 
   12540 	for (i = 0; i < wordcnt; i++) {
   12541 		wm_eeprom_recvbits(sc, &val, 16);
   12542 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12543 	}
   12544 
   12545 	/* Raise CS and clear SK. */
   12546 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12547 	CSR_WRITE(sc, WMREG_EECD, reg);
   12548 	CSR_WRITE_FLUSH(sc);
   12549 	delay(2);
   12550 
   12551 out:
   12552 	sc->nvm.release(sc);
   12553 	return rv;
   12554 }
   12555 
   12556 /* Using with EERD */
   12557 
   12558 static int
   12559 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12560 {
   12561 	uint32_t attempts = 100000;
   12562 	uint32_t i, reg = 0;
   12563 	int32_t done = -1;
   12564 
   12565 	for (i = 0; i < attempts; i++) {
   12566 		reg = CSR_READ(sc, rw);
   12567 
   12568 		if (reg & EERD_DONE) {
   12569 			done = 0;
   12570 			break;
   12571 		}
   12572 		delay(5);
   12573 	}
   12574 
   12575 	return done;
   12576 }
   12577 
   12578 static int
   12579 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12580 {
   12581 	int i, eerd = 0;
   12582 	int rv = 0;
   12583 
   12584 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12585 		device_xname(sc->sc_dev), __func__));
   12586 
   12587 	if (sc->nvm.acquire(sc) != 0)
   12588 		return -1;
   12589 
   12590 	for (i = 0; i < wordcnt; i++) {
   12591 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12592 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12593 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12594 		if (rv != 0) {
   12595 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12596 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12597 			break;
   12598 		}
   12599 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12600 	}
   12601 
   12602 	sc->nvm.release(sc);
   12603 	return rv;
   12604 }
   12605 
   12606 /* Flash */
   12607 
   12608 static int
   12609 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12610 {
   12611 	uint32_t eecd;
   12612 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12613 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12614 	uint32_t nvm_dword = 0;
   12615 	uint8_t sig_byte = 0;
   12616 	int rv;
   12617 
   12618 	switch (sc->sc_type) {
   12619 	case WM_T_PCH_SPT:
   12620 	case WM_T_PCH_CNP:
   12621 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12622 		act_offset = ICH_NVM_SIG_WORD * 2;
   12623 
   12624 		/* set bank to 0 in case flash read fails. */
   12625 		*bank = 0;
   12626 
   12627 		/* Check bank 0 */
   12628 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12629 		if (rv != 0)
   12630 			return rv;
   12631 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12632 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12633 			*bank = 0;
   12634 			return 0;
   12635 		}
   12636 
   12637 		/* Check bank 1 */
   12638 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12639 		    &nvm_dword);
   12640 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12641 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12642 			*bank = 1;
   12643 			return 0;
   12644 		}
   12645 		aprint_error_dev(sc->sc_dev,
   12646 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12647 		return -1;
   12648 	case WM_T_ICH8:
   12649 	case WM_T_ICH9:
   12650 		eecd = CSR_READ(sc, WMREG_EECD);
   12651 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12652 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12653 			return 0;
   12654 		}
   12655 		/* FALLTHROUGH */
   12656 	default:
   12657 		/* Default to 0 */
   12658 		*bank = 0;
   12659 
   12660 		/* Check bank 0 */
   12661 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12662 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12663 			*bank = 0;
   12664 			return 0;
   12665 		}
   12666 
   12667 		/* Check bank 1 */
   12668 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12669 		    &sig_byte);
   12670 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12671 			*bank = 1;
   12672 			return 0;
   12673 		}
   12674 	}
   12675 
   12676 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12677 		device_xname(sc->sc_dev)));
   12678 	return -1;
   12679 }
   12680 
   12681 /******************************************************************************
   12682  * This function does initial flash setup so that a new read/write/erase cycle
   12683  * can be started.
   12684  *
   12685  * sc - The pointer to the hw structure
   12686  ****************************************************************************/
   12687 static int32_t
   12688 wm_ich8_cycle_init(struct wm_softc *sc)
   12689 {
   12690 	uint16_t hsfsts;
   12691 	int32_t error = 1;
   12692 	int32_t i     = 0;
   12693 
   12694 	if (sc->sc_type >= WM_T_PCH_SPT)
   12695 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12696 	else
   12697 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12698 
   12699 	/* May be check the Flash Des Valid bit in Hw status */
   12700 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12701 		return error;
   12702 
   12703 	/* Clear FCERR in Hw status by writing 1 */
   12704 	/* Clear DAEL in Hw status by writing a 1 */
   12705 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12706 
   12707 	if (sc->sc_type >= WM_T_PCH_SPT)
   12708 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12709 	else
   12710 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12711 
   12712 	/*
   12713 	 * Either we should have a hardware SPI cycle in progress bit to check
   12714 	 * against, in order to start a new cycle or FDONE bit should be
   12715 	 * changed in the hardware so that it is 1 after harware reset, which
   12716 	 * can then be used as an indication whether a cycle is in progress or
   12717 	 * has been completed .. we should also have some software semaphore
   12718 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12719 	 * threads access to those bits can be sequentiallized or a way so that
   12720 	 * 2 threads dont start the cycle at the same time
   12721 	 */
   12722 
   12723 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12724 		/*
   12725 		 * There is no cycle running at present, so we can start a
   12726 		 * cycle
   12727 		 */
   12728 
   12729 		/* Begin by setting Flash Cycle Done. */
   12730 		hsfsts |= HSFSTS_DONE;
   12731 		if (sc->sc_type >= WM_T_PCH_SPT)
   12732 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12733 			    hsfsts & 0xffffUL);
   12734 		else
   12735 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12736 		error = 0;
   12737 	} else {
   12738 		/*
   12739 		 * otherwise poll for sometime so the current cycle has a
   12740 		 * chance to end before giving up.
   12741 		 */
   12742 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12743 			if (sc->sc_type >= WM_T_PCH_SPT)
   12744 				hsfsts = ICH8_FLASH_READ32(sc,
   12745 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12746 			else
   12747 				hsfsts = ICH8_FLASH_READ16(sc,
   12748 				    ICH_FLASH_HSFSTS);
   12749 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12750 				error = 0;
   12751 				break;
   12752 			}
   12753 			delay(1);
   12754 		}
   12755 		if (error == 0) {
   12756 			/*
   12757 			 * Successful in waiting for previous cycle to timeout,
   12758 			 * now set the Flash Cycle Done.
   12759 			 */
   12760 			hsfsts |= HSFSTS_DONE;
   12761 			if (sc->sc_type >= WM_T_PCH_SPT)
   12762 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12763 				    hsfsts & 0xffffUL);
   12764 			else
   12765 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12766 				    hsfsts);
   12767 		}
   12768 	}
   12769 	return error;
   12770 }
   12771 
   12772 /******************************************************************************
   12773  * This function starts a flash cycle and waits for its completion
   12774  *
   12775  * sc - The pointer to the hw structure
   12776  ****************************************************************************/
   12777 static int32_t
   12778 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12779 {
   12780 	uint16_t hsflctl;
   12781 	uint16_t hsfsts;
   12782 	int32_t error = 1;
   12783 	uint32_t i = 0;
   12784 
   12785 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12786 	if (sc->sc_type >= WM_T_PCH_SPT)
   12787 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12788 	else
   12789 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12790 	hsflctl |= HSFCTL_GO;
   12791 	if (sc->sc_type >= WM_T_PCH_SPT)
   12792 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12793 		    (uint32_t)hsflctl << 16);
   12794 	else
   12795 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12796 
   12797 	/* Wait till FDONE bit is set to 1 */
   12798 	do {
   12799 		if (sc->sc_type >= WM_T_PCH_SPT)
   12800 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12801 			    & 0xffffUL;
   12802 		else
   12803 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12804 		if (hsfsts & HSFSTS_DONE)
   12805 			break;
   12806 		delay(1);
   12807 		i++;
   12808 	} while (i < timeout);
   12809 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12810 		error = 0;
   12811 
   12812 	return error;
   12813 }
   12814 
   12815 /******************************************************************************
   12816  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12817  *
   12818  * sc - The pointer to the hw structure
   12819  * index - The index of the byte or word to read.
   12820  * size - Size of data to read, 1=byte 2=word, 4=dword
   12821  * data - Pointer to the word to store the value read.
   12822  *****************************************************************************/
   12823 static int32_t
   12824 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12825     uint32_t size, uint32_t *data)
   12826 {
   12827 	uint16_t hsfsts;
   12828 	uint16_t hsflctl;
   12829 	uint32_t flash_linear_address;
   12830 	uint32_t flash_data = 0;
   12831 	int32_t error = 1;
   12832 	int32_t count = 0;
   12833 
   12834 	if (size < 1  || size > 4 || data == 0x0 ||
   12835 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12836 		return error;
   12837 
   12838 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12839 	    sc->sc_ich8_flash_base;
   12840 
   12841 	do {
   12842 		delay(1);
   12843 		/* Steps */
   12844 		error = wm_ich8_cycle_init(sc);
   12845 		if (error)
   12846 			break;
   12847 
   12848 		if (sc->sc_type >= WM_T_PCH_SPT)
   12849 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12850 			    >> 16;
   12851 		else
   12852 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12853 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12854 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12855 		    & HSFCTL_BCOUNT_MASK;
   12856 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12857 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12858 			/*
   12859 			 * In SPT, This register is in Lan memory space, not
   12860 			 * flash. Therefore, only 32 bit access is supported.
   12861 			 */
   12862 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12863 			    (uint32_t)hsflctl << 16);
   12864 		} else
   12865 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12866 
   12867 		/*
   12868 		 * Write the last 24 bits of index into Flash Linear address
   12869 		 * field in Flash Address
   12870 		 */
   12871 		/* TODO: TBD maybe check the index against the size of flash */
   12872 
   12873 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12874 
   12875 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12876 
   12877 		/*
   12878 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12879 		 * the whole sequence a few more times, else read in (shift in)
   12880 		 * the Flash Data0, the order is least significant byte first
   12881 		 * msb to lsb
   12882 		 */
   12883 		if (error == 0) {
   12884 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12885 			if (size == 1)
   12886 				*data = (uint8_t)(flash_data & 0x000000FF);
   12887 			else if (size == 2)
   12888 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12889 			else if (size == 4)
   12890 				*data = (uint32_t)flash_data;
   12891 			break;
   12892 		} else {
   12893 			/*
   12894 			 * If we've gotten here, then things are probably
   12895 			 * completely hosed, but if the error condition is
   12896 			 * detected, it won't hurt to give it another try...
   12897 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12898 			 */
   12899 			if (sc->sc_type >= WM_T_PCH_SPT)
   12900 				hsfsts = ICH8_FLASH_READ32(sc,
   12901 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12902 			else
   12903 				hsfsts = ICH8_FLASH_READ16(sc,
   12904 				    ICH_FLASH_HSFSTS);
   12905 
   12906 			if (hsfsts & HSFSTS_ERR) {
   12907 				/* Repeat for some time before giving up. */
   12908 				continue;
   12909 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12910 				break;
   12911 		}
   12912 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12913 
   12914 	return error;
   12915 }
   12916 
   12917 /******************************************************************************
   12918  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12919  *
   12920  * sc - pointer to wm_hw structure
   12921  * index - The index of the byte to read.
   12922  * data - Pointer to a byte to store the value read.
   12923  *****************************************************************************/
   12924 static int32_t
   12925 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12926 {
   12927 	int32_t status;
   12928 	uint32_t word = 0;
   12929 
   12930 	status = wm_read_ich8_data(sc, index, 1, &word);
   12931 	if (status == 0)
   12932 		*data = (uint8_t)word;
   12933 	else
   12934 		*data = 0;
   12935 
   12936 	return status;
   12937 }
   12938 
   12939 /******************************************************************************
   12940  * Reads a word from the NVM using the ICH8 flash access registers.
   12941  *
   12942  * sc - pointer to wm_hw structure
   12943  * index - The starting byte index of the word to read.
   12944  * data - Pointer to a word to store the value read.
   12945  *****************************************************************************/
   12946 static int32_t
   12947 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12948 {
   12949 	int32_t status;
   12950 	uint32_t word = 0;
   12951 
   12952 	status = wm_read_ich8_data(sc, index, 2, &word);
   12953 	if (status == 0)
   12954 		*data = (uint16_t)word;
   12955 	else
   12956 		*data = 0;
   12957 
   12958 	return status;
   12959 }
   12960 
   12961 /******************************************************************************
   12962  * Reads a dword from the NVM using the ICH8 flash access registers.
   12963  *
   12964  * sc - pointer to wm_hw structure
   12965  * index - The starting byte index of the word to read.
   12966  * data - Pointer to a word to store the value read.
   12967  *****************************************************************************/
   12968 static int32_t
   12969 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12970 {
   12971 	int32_t status;
   12972 
   12973 	status = wm_read_ich8_data(sc, index, 4, data);
   12974 	return status;
   12975 }
   12976 
   12977 /******************************************************************************
   12978  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12979  * register.
   12980  *
   12981  * sc - Struct containing variables accessed by shared code
   12982  * offset - offset of word in the EEPROM to read
   12983  * data - word read from the EEPROM
   12984  * words - number of words to read
   12985  *****************************************************************************/
   12986 static int
   12987 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12988 {
   12989 	int32_t	 rv = 0;
   12990 	uint32_t flash_bank = 0;
   12991 	uint32_t act_offset = 0;
   12992 	uint32_t bank_offset = 0;
   12993 	uint16_t word = 0;
   12994 	uint16_t i = 0;
   12995 
   12996 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12997 		device_xname(sc->sc_dev), __func__));
   12998 
   12999 	if (sc->nvm.acquire(sc) != 0)
   13000 		return -1;
   13001 
   13002 	/*
   13003 	 * We need to know which is the valid flash bank.  In the event
   13004 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13005 	 * managing flash_bank. So it cannot be trusted and needs
   13006 	 * to be updated with each read.
   13007 	 */
   13008 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13009 	if (rv) {
   13010 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13011 			device_xname(sc->sc_dev)));
   13012 		flash_bank = 0;
   13013 	}
   13014 
   13015 	/*
   13016 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13017 	 * size
   13018 	 */
   13019 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13020 
   13021 	for (i = 0; i < words; i++) {
   13022 		/* The NVM part needs a byte offset, hence * 2 */
   13023 		act_offset = bank_offset + ((offset + i) * 2);
   13024 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13025 		if (rv) {
   13026 			aprint_error_dev(sc->sc_dev,
   13027 			    "%s: failed to read NVM\n", __func__);
   13028 			break;
   13029 		}
   13030 		data[i] = word;
   13031 	}
   13032 
   13033 	sc->nvm.release(sc);
   13034 	return rv;
   13035 }
   13036 
   13037 /******************************************************************************
   13038  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13039  * register.
   13040  *
   13041  * sc - Struct containing variables accessed by shared code
   13042  * offset - offset of word in the EEPROM to read
   13043  * data - word read from the EEPROM
   13044  * words - number of words to read
   13045  *****************************************************************************/
   13046 static int
   13047 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13048 {
   13049 	int32_t	 rv = 0;
   13050 	uint32_t flash_bank = 0;
   13051 	uint32_t act_offset = 0;
   13052 	uint32_t bank_offset = 0;
   13053 	uint32_t dword = 0;
   13054 	uint16_t i = 0;
   13055 
   13056 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13057 		device_xname(sc->sc_dev), __func__));
   13058 
   13059 	if (sc->nvm.acquire(sc) != 0)
   13060 		return -1;
   13061 
   13062 	/*
   13063 	 * We need to know which is the valid flash bank.  In the event
   13064 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13065 	 * managing flash_bank. So it cannot be trusted and needs
   13066 	 * to be updated with each read.
   13067 	 */
   13068 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13069 	if (rv) {
   13070 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13071 			device_xname(sc->sc_dev)));
   13072 		flash_bank = 0;
   13073 	}
   13074 
   13075 	/*
   13076 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13077 	 * size
   13078 	 */
   13079 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13080 
   13081 	for (i = 0; i < words; i++) {
   13082 		/* The NVM part needs a byte offset, hence * 2 */
   13083 		act_offset = bank_offset + ((offset + i) * 2);
   13084 		/* but we must read dword aligned, so mask ... */
   13085 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13086 		if (rv) {
   13087 			aprint_error_dev(sc->sc_dev,
   13088 			    "%s: failed to read NVM\n", __func__);
   13089 			break;
   13090 		}
   13091 		/* ... and pick out low or high word */
   13092 		if ((act_offset & 0x2) == 0)
   13093 			data[i] = (uint16_t)(dword & 0xFFFF);
   13094 		else
   13095 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13096 	}
   13097 
   13098 	sc->nvm.release(sc);
   13099 	return rv;
   13100 }
   13101 
   13102 /* iNVM */
   13103 
   13104 static int
   13105 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13106 {
   13107 	int32_t	 rv = 0;
   13108 	uint32_t invm_dword;
   13109 	uint16_t i;
   13110 	uint8_t record_type, word_address;
   13111 
   13112 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13113 		device_xname(sc->sc_dev), __func__));
   13114 
   13115 	for (i = 0; i < INVM_SIZE; i++) {
   13116 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13117 		/* Get record type */
   13118 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13119 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13120 			break;
   13121 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13122 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13123 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13124 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13125 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13126 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13127 			if (word_address == address) {
   13128 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13129 				rv = 0;
   13130 				break;
   13131 			}
   13132 		}
   13133 	}
   13134 
   13135 	return rv;
   13136 }
   13137 
   13138 static int
   13139 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13140 {
   13141 	int rv = 0;
   13142 	int i;
   13143 
   13144 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13145 		device_xname(sc->sc_dev), __func__));
   13146 
   13147 	if (sc->nvm.acquire(sc) != 0)
   13148 		return -1;
   13149 
   13150 	for (i = 0; i < words; i++) {
   13151 		switch (offset + i) {
   13152 		case NVM_OFF_MACADDR:
   13153 		case NVM_OFF_MACADDR1:
   13154 		case NVM_OFF_MACADDR2:
   13155 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13156 			if (rv != 0) {
   13157 				data[i] = 0xffff;
   13158 				rv = -1;
   13159 			}
   13160 			break;
   13161 		case NVM_OFF_CFG2:
   13162 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13163 			if (rv != 0) {
   13164 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13165 				rv = 0;
   13166 			}
   13167 			break;
   13168 		case NVM_OFF_CFG4:
   13169 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13170 			if (rv != 0) {
   13171 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13172 				rv = 0;
   13173 			}
   13174 			break;
   13175 		case NVM_OFF_LED_1_CFG:
   13176 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13177 			if (rv != 0) {
   13178 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13179 				rv = 0;
   13180 			}
   13181 			break;
   13182 		case NVM_OFF_LED_0_2_CFG:
   13183 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13184 			if (rv != 0) {
   13185 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13186 				rv = 0;
   13187 			}
   13188 			break;
   13189 		case NVM_OFF_ID_LED_SETTINGS:
   13190 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13191 			if (rv != 0) {
   13192 				*data = ID_LED_RESERVED_FFFF;
   13193 				rv = 0;
   13194 			}
   13195 			break;
   13196 		default:
   13197 			DPRINTF(WM_DEBUG_NVM,
   13198 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13199 			*data = NVM_RESERVED_WORD;
   13200 			break;
   13201 		}
   13202 	}
   13203 
   13204 	sc->nvm.release(sc);
   13205 	return rv;
   13206 }
   13207 
   13208 /* Lock, detecting NVM type, validate checksum, version and read */
   13209 
   13210 static int
   13211 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13212 {
   13213 	uint32_t eecd = 0;
   13214 
   13215 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13216 	    || sc->sc_type == WM_T_82583) {
   13217 		eecd = CSR_READ(sc, WMREG_EECD);
   13218 
   13219 		/* Isolate bits 15 & 16 */
   13220 		eecd = ((eecd >> 15) & 0x03);
   13221 
   13222 		/* If both bits are set, device is Flash type */
   13223 		if (eecd == 0x03)
   13224 			return 0;
   13225 	}
   13226 	return 1;
   13227 }
   13228 
   13229 static int
   13230 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13231 {
   13232 	uint32_t eec;
   13233 
   13234 	eec = CSR_READ(sc, WMREG_EEC);
   13235 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13236 		return 1;
   13237 
   13238 	return 0;
   13239 }
   13240 
   13241 /*
   13242  * wm_nvm_validate_checksum
   13243  *
   13244  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13245  */
   13246 static int
   13247 wm_nvm_validate_checksum(struct wm_softc *sc)
   13248 {
   13249 	uint16_t checksum;
   13250 	uint16_t eeprom_data;
   13251 #ifdef WM_DEBUG
   13252 	uint16_t csum_wordaddr, valid_checksum;
   13253 #endif
   13254 	int i;
   13255 
   13256 	checksum = 0;
   13257 
   13258 	/* Don't check for I211 */
   13259 	if (sc->sc_type == WM_T_I211)
   13260 		return 0;
   13261 
   13262 #ifdef WM_DEBUG
   13263 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13264 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13265 		csum_wordaddr = NVM_OFF_COMPAT;
   13266 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13267 	} else {
   13268 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13269 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13270 	}
   13271 
   13272 	/* Dump EEPROM image for debug */
   13273 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13274 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13275 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13276 		/* XXX PCH_SPT? */
   13277 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13278 		if ((eeprom_data & valid_checksum) == 0) {
   13279 			DPRINTF(WM_DEBUG_NVM,
   13280 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13281 				device_xname(sc->sc_dev), eeprom_data,
   13282 				    valid_checksum));
   13283 		}
   13284 	}
   13285 
   13286 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13287 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13288 		for (i = 0; i < NVM_SIZE; i++) {
   13289 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13290 				printf("XXXX ");
   13291 			else
   13292 				printf("%04hx ", eeprom_data);
   13293 			if (i % 8 == 7)
   13294 				printf("\n");
   13295 		}
   13296 	}
   13297 
   13298 #endif /* WM_DEBUG */
   13299 
   13300 	for (i = 0; i < NVM_SIZE; i++) {
   13301 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13302 			return 1;
   13303 		checksum += eeprom_data;
   13304 	}
   13305 
   13306 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13307 #ifdef WM_DEBUG
   13308 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13309 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13310 #endif
   13311 	}
   13312 
   13313 	return 0;
   13314 }
   13315 
   13316 static void
   13317 wm_nvm_version_invm(struct wm_softc *sc)
   13318 {
   13319 	uint32_t dword;
   13320 
   13321 	/*
   13322 	 * Linux's code to decode version is very strange, so we don't
   13323 	 * obey that algorithm and just use word 61 as the document.
   13324 	 * Perhaps it's not perfect though...
   13325 	 *
   13326 	 * Example:
   13327 	 *
   13328 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13329 	 */
   13330 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13331 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13332 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13333 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13334 }
   13335 
   13336 static void
   13337 wm_nvm_version(struct wm_softc *sc)
   13338 {
   13339 	uint16_t major, minor, build, patch;
   13340 	uint16_t uid0, uid1;
   13341 	uint16_t nvm_data;
   13342 	uint16_t off;
   13343 	bool check_version = false;
   13344 	bool check_optionrom = false;
   13345 	bool have_build = false;
   13346 	bool have_uid = true;
   13347 
   13348 	/*
   13349 	 * Version format:
   13350 	 *
   13351 	 * XYYZ
   13352 	 * X0YZ
   13353 	 * X0YY
   13354 	 *
   13355 	 * Example:
   13356 	 *
   13357 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13358 	 *	82571	0x50a6	5.10.6?
   13359 	 *	82572	0x506a	5.6.10?
   13360 	 *	82572EI	0x5069	5.6.9?
   13361 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13362 	 *		0x2013	2.1.3?
   13363 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13364 	 */
   13365 
   13366 	/*
   13367 	 * XXX
   13368 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13369 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13370 	 */
   13371 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13372 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13373 		have_uid = false;
   13374 
   13375 	switch (sc->sc_type) {
   13376 	case WM_T_82571:
   13377 	case WM_T_82572:
   13378 	case WM_T_82574:
   13379 	case WM_T_82583:
   13380 		check_version = true;
   13381 		check_optionrom = true;
   13382 		have_build = true;
   13383 		break;
   13384 	case WM_T_82575:
   13385 	case WM_T_82576:
   13386 	case WM_T_82580:
   13387 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13388 			check_version = true;
   13389 		break;
   13390 	case WM_T_I211:
   13391 		wm_nvm_version_invm(sc);
   13392 		have_uid = false;
   13393 		goto printver;
   13394 	case WM_T_I210:
   13395 		if (!wm_nvm_flash_presence_i210(sc)) {
   13396 			wm_nvm_version_invm(sc);
   13397 			have_uid = false;
   13398 			goto printver;
   13399 		}
   13400 		/* FALLTHROUGH */
   13401 	case WM_T_I350:
   13402 	case WM_T_I354:
   13403 		check_version = true;
   13404 		check_optionrom = true;
   13405 		break;
   13406 	default:
   13407 		return;
   13408 	}
   13409 	if (check_version
   13410 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13411 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13412 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13413 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13414 			build = nvm_data & NVM_BUILD_MASK;
   13415 			have_build = true;
   13416 		} else
   13417 			minor = nvm_data & 0x00ff;
   13418 
   13419 		/* Decimal */
   13420 		minor = (minor / 16) * 10 + (minor % 16);
   13421 		sc->sc_nvm_ver_major = major;
   13422 		sc->sc_nvm_ver_minor = minor;
   13423 
   13424 printver:
   13425 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13426 		    sc->sc_nvm_ver_minor);
   13427 		if (have_build) {
   13428 			sc->sc_nvm_ver_build = build;
   13429 			aprint_verbose(".%d", build);
   13430 		}
   13431 	}
   13432 
   13433 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13434 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13435 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13436 		/* Option ROM Version */
   13437 		if ((off != 0x0000) && (off != 0xffff)) {
   13438 			int rv;
   13439 
   13440 			off += NVM_COMBO_VER_OFF;
   13441 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13442 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13443 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13444 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13445 				/* 16bits */
   13446 				major = uid0 >> 8;
   13447 				build = (uid0 << 8) | (uid1 >> 8);
   13448 				patch = uid1 & 0x00ff;
   13449 				aprint_verbose(", option ROM Version %d.%d.%d",
   13450 				    major, build, patch);
   13451 			}
   13452 		}
   13453 	}
   13454 
   13455 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13456 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13457 }
   13458 
   13459 /*
   13460  * wm_nvm_read:
   13461  *
   13462  *	Read data from the serial EEPROM.
   13463  */
   13464 static int
   13465 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13466 {
   13467 	int rv;
   13468 
   13469 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13470 		device_xname(sc->sc_dev), __func__));
   13471 
   13472 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13473 		return -1;
   13474 
   13475 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13476 
   13477 	return rv;
   13478 }
   13479 
   13480 /*
   13481  * Hardware semaphores.
   13482  * Very complexed...
   13483  */
   13484 
   13485 static int
   13486 wm_get_null(struct wm_softc *sc)
   13487 {
   13488 
   13489 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13490 		device_xname(sc->sc_dev), __func__));
   13491 	return 0;
   13492 }
   13493 
   13494 static void
   13495 wm_put_null(struct wm_softc *sc)
   13496 {
   13497 
   13498 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13499 		device_xname(sc->sc_dev), __func__));
   13500 	return;
   13501 }
   13502 
   13503 static int
   13504 wm_get_eecd(struct wm_softc *sc)
   13505 {
   13506 	uint32_t reg;
   13507 	int x;
   13508 
   13509 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13510 		device_xname(sc->sc_dev), __func__));
   13511 
   13512 	reg = CSR_READ(sc, WMREG_EECD);
   13513 
   13514 	/* Request EEPROM access. */
   13515 	reg |= EECD_EE_REQ;
   13516 	CSR_WRITE(sc, WMREG_EECD, reg);
   13517 
   13518 	/* ..and wait for it to be granted. */
   13519 	for (x = 0; x < 1000; x++) {
   13520 		reg = CSR_READ(sc, WMREG_EECD);
   13521 		if (reg & EECD_EE_GNT)
   13522 			break;
   13523 		delay(5);
   13524 	}
   13525 	if ((reg & EECD_EE_GNT) == 0) {
   13526 		aprint_error_dev(sc->sc_dev,
   13527 		    "could not acquire EEPROM GNT\n");
   13528 		reg &= ~EECD_EE_REQ;
   13529 		CSR_WRITE(sc, WMREG_EECD, reg);
   13530 		return -1;
   13531 	}
   13532 
   13533 	return 0;
   13534 }
   13535 
   13536 static void
   13537 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13538 {
   13539 
   13540 	*eecd |= EECD_SK;
   13541 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13542 	CSR_WRITE_FLUSH(sc);
   13543 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13544 		delay(1);
   13545 	else
   13546 		delay(50);
   13547 }
   13548 
   13549 static void
   13550 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13551 {
   13552 
   13553 	*eecd &= ~EECD_SK;
   13554 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13555 	CSR_WRITE_FLUSH(sc);
   13556 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13557 		delay(1);
   13558 	else
   13559 		delay(50);
   13560 }
   13561 
   13562 static void
   13563 wm_put_eecd(struct wm_softc *sc)
   13564 {
   13565 	uint32_t reg;
   13566 
   13567 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13568 		device_xname(sc->sc_dev), __func__));
   13569 
   13570 	/* Stop nvm */
   13571 	reg = CSR_READ(sc, WMREG_EECD);
   13572 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13573 		/* Pull CS high */
   13574 		reg |= EECD_CS;
   13575 		wm_nvm_eec_clock_lower(sc, &reg);
   13576 	} else {
   13577 		/* CS on Microwire is active-high */
   13578 		reg &= ~(EECD_CS | EECD_DI);
   13579 		CSR_WRITE(sc, WMREG_EECD, reg);
   13580 		wm_nvm_eec_clock_raise(sc, &reg);
   13581 		wm_nvm_eec_clock_lower(sc, &reg);
   13582 	}
   13583 
   13584 	reg = CSR_READ(sc, WMREG_EECD);
   13585 	reg &= ~EECD_EE_REQ;
   13586 	CSR_WRITE(sc, WMREG_EECD, reg);
   13587 
   13588 	return;
   13589 }
   13590 
   13591 /*
   13592  * Get hardware semaphore.
   13593  * Same as e1000_get_hw_semaphore_generic()
   13594  */
   13595 static int
   13596 wm_get_swsm_semaphore(struct wm_softc *sc)
   13597 {
   13598 	int32_t timeout;
   13599 	uint32_t swsm;
   13600 
   13601 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13602 		device_xname(sc->sc_dev), __func__));
   13603 	KASSERT(sc->sc_nvm_wordsize > 0);
   13604 
   13605 retry:
   13606 	/* Get the SW semaphore. */
   13607 	timeout = sc->sc_nvm_wordsize + 1;
   13608 	while (timeout) {
   13609 		swsm = CSR_READ(sc, WMREG_SWSM);
   13610 
   13611 		if ((swsm & SWSM_SMBI) == 0)
   13612 			break;
   13613 
   13614 		delay(50);
   13615 		timeout--;
   13616 	}
   13617 
   13618 	if (timeout == 0) {
   13619 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13620 			/*
   13621 			 * In rare circumstances, the SW semaphore may already
   13622 			 * be held unintentionally. Clear the semaphore once
   13623 			 * before giving up.
   13624 			 */
   13625 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13626 			wm_put_swsm_semaphore(sc);
   13627 			goto retry;
   13628 		}
   13629 		aprint_error_dev(sc->sc_dev,
   13630 		    "could not acquire SWSM SMBI\n");
   13631 		return 1;
   13632 	}
   13633 
   13634 	/* Get the FW semaphore. */
   13635 	timeout = sc->sc_nvm_wordsize + 1;
   13636 	while (timeout) {
   13637 		swsm = CSR_READ(sc, WMREG_SWSM);
   13638 		swsm |= SWSM_SWESMBI;
   13639 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13640 		/* If we managed to set the bit we got the semaphore. */
   13641 		swsm = CSR_READ(sc, WMREG_SWSM);
   13642 		if (swsm & SWSM_SWESMBI)
   13643 			break;
   13644 
   13645 		delay(50);
   13646 		timeout--;
   13647 	}
   13648 
   13649 	if (timeout == 0) {
   13650 		aprint_error_dev(sc->sc_dev,
   13651 		    "could not acquire SWSM SWESMBI\n");
   13652 		/* Release semaphores */
   13653 		wm_put_swsm_semaphore(sc);
   13654 		return 1;
   13655 	}
   13656 	return 0;
   13657 }
   13658 
   13659 /*
   13660  * Put hardware semaphore.
   13661  * Same as e1000_put_hw_semaphore_generic()
   13662  */
   13663 static void
   13664 wm_put_swsm_semaphore(struct wm_softc *sc)
   13665 {
   13666 	uint32_t swsm;
   13667 
   13668 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13669 		device_xname(sc->sc_dev), __func__));
   13670 
   13671 	swsm = CSR_READ(sc, WMREG_SWSM);
   13672 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13673 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13674 }
   13675 
   13676 /*
   13677  * Get SW/FW semaphore.
   13678  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13679  */
   13680 static int
   13681 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13682 {
   13683 	uint32_t swfw_sync;
   13684 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13685 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13686 	int timeout;
   13687 
   13688 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13689 		device_xname(sc->sc_dev), __func__));
   13690 
   13691 	if (sc->sc_type == WM_T_80003)
   13692 		timeout = 50;
   13693 	else
   13694 		timeout = 200;
   13695 
   13696 	while (timeout) {
   13697 		if (wm_get_swsm_semaphore(sc)) {
   13698 			aprint_error_dev(sc->sc_dev,
   13699 			    "%s: failed to get semaphore\n",
   13700 			    __func__);
   13701 			return 1;
   13702 		}
   13703 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13704 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13705 			swfw_sync |= swmask;
   13706 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13707 			wm_put_swsm_semaphore(sc);
   13708 			return 0;
   13709 		}
   13710 		wm_put_swsm_semaphore(sc);
   13711 		delay(5000);
   13712 		timeout--;
   13713 	}
   13714 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13715 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13716 	return 1;
   13717 }
   13718 
   13719 static void
   13720 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13721 {
   13722 	uint32_t swfw_sync;
   13723 
   13724 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13725 		device_xname(sc->sc_dev), __func__));
   13726 
   13727 	while (wm_get_swsm_semaphore(sc) != 0)
   13728 		continue;
   13729 
   13730 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13731 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13732 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13733 
   13734 	wm_put_swsm_semaphore(sc);
   13735 }
   13736 
   13737 static int
   13738 wm_get_nvm_80003(struct wm_softc *sc)
   13739 {
   13740 	int rv;
   13741 
   13742 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13743 		device_xname(sc->sc_dev), __func__));
   13744 
   13745 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13746 		aprint_error_dev(sc->sc_dev,
   13747 		    "%s: failed to get semaphore(SWFW)\n",
   13748 		    __func__);
   13749 		return rv;
   13750 	}
   13751 
   13752 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13753 	    && (rv = wm_get_eecd(sc)) != 0) {
   13754 		aprint_error_dev(sc->sc_dev,
   13755 		    "%s: failed to get semaphore(EECD)\n",
   13756 		    __func__);
   13757 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13758 		return rv;
   13759 	}
   13760 
   13761 	return 0;
   13762 }
   13763 
   13764 static void
   13765 wm_put_nvm_80003(struct wm_softc *sc)
   13766 {
   13767 
   13768 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13769 		device_xname(sc->sc_dev), __func__));
   13770 
   13771 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13772 		wm_put_eecd(sc);
   13773 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13774 }
   13775 
   13776 static int
   13777 wm_get_nvm_82571(struct wm_softc *sc)
   13778 {
   13779 	int rv;
   13780 
   13781 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13782 		device_xname(sc->sc_dev), __func__));
   13783 
   13784 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13785 		return rv;
   13786 
   13787 	switch (sc->sc_type) {
   13788 	case WM_T_82573:
   13789 		break;
   13790 	default:
   13791 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13792 			rv = wm_get_eecd(sc);
   13793 		break;
   13794 	}
   13795 
   13796 	if (rv != 0) {
   13797 		aprint_error_dev(sc->sc_dev,
   13798 		    "%s: failed to get semaphore\n",
   13799 		    __func__);
   13800 		wm_put_swsm_semaphore(sc);
   13801 	}
   13802 
   13803 	return rv;
   13804 }
   13805 
   13806 static void
   13807 wm_put_nvm_82571(struct wm_softc *sc)
   13808 {
   13809 
   13810 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13811 		device_xname(sc->sc_dev), __func__));
   13812 
   13813 	switch (sc->sc_type) {
   13814 	case WM_T_82573:
   13815 		break;
   13816 	default:
   13817 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13818 			wm_put_eecd(sc);
   13819 		break;
   13820 	}
   13821 
   13822 	wm_put_swsm_semaphore(sc);
   13823 }
   13824 
   13825 static int
   13826 wm_get_phy_82575(struct wm_softc *sc)
   13827 {
   13828 
   13829 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13830 		device_xname(sc->sc_dev), __func__));
   13831 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13832 }
   13833 
   13834 static void
   13835 wm_put_phy_82575(struct wm_softc *sc)
   13836 {
   13837 
   13838 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13839 		device_xname(sc->sc_dev), __func__));
   13840 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13841 }
   13842 
   13843 static int
   13844 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13845 {
   13846 	uint32_t ext_ctrl;
   13847 	int timeout = 200;
   13848 
   13849 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13850 		device_xname(sc->sc_dev), __func__));
   13851 
   13852 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13853 	for (timeout = 0; timeout < 200; timeout++) {
   13854 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13855 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13856 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13857 
   13858 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13859 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13860 			return 0;
   13861 		delay(5000);
   13862 	}
   13863 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13864 	    device_xname(sc->sc_dev), ext_ctrl);
   13865 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13866 	return 1;
   13867 }
   13868 
   13869 static void
   13870 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13871 {
   13872 	uint32_t ext_ctrl;
   13873 
   13874 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13875 		device_xname(sc->sc_dev), __func__));
   13876 
   13877 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13878 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13879 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13880 
   13881 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13882 }
   13883 
   13884 static int
   13885 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13886 {
   13887 	uint32_t ext_ctrl;
   13888 	int timeout;
   13889 
   13890 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13891 		device_xname(sc->sc_dev), __func__));
   13892 	mutex_enter(sc->sc_ich_phymtx);
   13893 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13894 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13895 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13896 			break;
   13897 		delay(1000);
   13898 	}
   13899 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13900 		printf("%s: SW has already locked the resource\n",
   13901 		    device_xname(sc->sc_dev));
   13902 		goto out;
   13903 	}
   13904 
   13905 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13906 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13907 	for (timeout = 0; timeout < 1000; timeout++) {
   13908 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13909 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13910 			break;
   13911 		delay(1000);
   13912 	}
   13913 	if (timeout >= 1000) {
   13914 		printf("%s: failed to acquire semaphore\n",
   13915 		    device_xname(sc->sc_dev));
   13916 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13917 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13918 		goto out;
   13919 	}
   13920 	return 0;
   13921 
   13922 out:
   13923 	mutex_exit(sc->sc_ich_phymtx);
   13924 	return 1;
   13925 }
   13926 
   13927 static void
   13928 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13929 {
   13930 	uint32_t ext_ctrl;
   13931 
   13932 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13933 		device_xname(sc->sc_dev), __func__));
   13934 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13935 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13936 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13937 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13938 	} else {
   13939 		printf("%s: Semaphore unexpectedly released\n",
   13940 		    device_xname(sc->sc_dev));
   13941 	}
   13942 
   13943 	mutex_exit(sc->sc_ich_phymtx);
   13944 }
   13945 
   13946 static int
   13947 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13948 {
   13949 
   13950 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13951 		device_xname(sc->sc_dev), __func__));
   13952 	mutex_enter(sc->sc_ich_nvmmtx);
   13953 
   13954 	return 0;
   13955 }
   13956 
   13957 static void
   13958 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13959 {
   13960 
   13961 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13962 		device_xname(sc->sc_dev), __func__));
   13963 	mutex_exit(sc->sc_ich_nvmmtx);
   13964 }
   13965 
   13966 static int
   13967 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13968 {
   13969 	int i = 0;
   13970 	uint32_t reg;
   13971 
   13972 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13973 		device_xname(sc->sc_dev), __func__));
   13974 
   13975 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13976 	do {
   13977 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13978 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13979 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13980 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13981 			break;
   13982 		delay(2*1000);
   13983 		i++;
   13984 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13985 
   13986 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13987 		wm_put_hw_semaphore_82573(sc);
   13988 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13989 		    device_xname(sc->sc_dev));
   13990 		return -1;
   13991 	}
   13992 
   13993 	return 0;
   13994 }
   13995 
   13996 static void
   13997 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13998 {
   13999 	uint32_t reg;
   14000 
   14001 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14002 		device_xname(sc->sc_dev), __func__));
   14003 
   14004 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14005 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14006 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14007 }
   14008 
   14009 /*
   14010  * Management mode and power management related subroutines.
   14011  * BMC, AMT, suspend/resume and EEE.
   14012  */
   14013 
   14014 #ifdef WM_WOL
   14015 static int
   14016 wm_check_mng_mode(struct wm_softc *sc)
   14017 {
   14018 	int rv;
   14019 
   14020 	switch (sc->sc_type) {
   14021 	case WM_T_ICH8:
   14022 	case WM_T_ICH9:
   14023 	case WM_T_ICH10:
   14024 	case WM_T_PCH:
   14025 	case WM_T_PCH2:
   14026 	case WM_T_PCH_LPT:
   14027 	case WM_T_PCH_SPT:
   14028 	case WM_T_PCH_CNP:
   14029 		rv = wm_check_mng_mode_ich8lan(sc);
   14030 		break;
   14031 	case WM_T_82574:
   14032 	case WM_T_82583:
   14033 		rv = wm_check_mng_mode_82574(sc);
   14034 		break;
   14035 	case WM_T_82571:
   14036 	case WM_T_82572:
   14037 	case WM_T_82573:
   14038 	case WM_T_80003:
   14039 		rv = wm_check_mng_mode_generic(sc);
   14040 		break;
   14041 	default:
   14042 		/* noting to do */
   14043 		rv = 0;
   14044 		break;
   14045 	}
   14046 
   14047 	return rv;
   14048 }
   14049 
   14050 static int
   14051 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14052 {
   14053 	uint32_t fwsm;
   14054 
   14055 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14056 
   14057 	if (((fwsm & FWSM_FW_VALID) != 0)
   14058 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14059 		return 1;
   14060 
   14061 	return 0;
   14062 }
   14063 
   14064 static int
   14065 wm_check_mng_mode_82574(struct wm_softc *sc)
   14066 {
   14067 	uint16_t data;
   14068 
   14069 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14070 
   14071 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14072 		return 1;
   14073 
   14074 	return 0;
   14075 }
   14076 
   14077 static int
   14078 wm_check_mng_mode_generic(struct wm_softc *sc)
   14079 {
   14080 	uint32_t fwsm;
   14081 
   14082 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14083 
   14084 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14085 		return 1;
   14086 
   14087 	return 0;
   14088 }
   14089 #endif /* WM_WOL */
   14090 
   14091 static int
   14092 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14093 {
   14094 	uint32_t manc, fwsm, factps;
   14095 
   14096 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14097 		return 0;
   14098 
   14099 	manc = CSR_READ(sc, WMREG_MANC);
   14100 
   14101 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14102 		device_xname(sc->sc_dev), manc));
   14103 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14104 		return 0;
   14105 
   14106 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14107 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14108 		factps = CSR_READ(sc, WMREG_FACTPS);
   14109 		if (((factps & FACTPS_MNGCG) == 0)
   14110 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14111 			return 1;
   14112 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14113 		uint16_t data;
   14114 
   14115 		factps = CSR_READ(sc, WMREG_FACTPS);
   14116 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14117 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14118 			device_xname(sc->sc_dev), factps, data));
   14119 		if (((factps & FACTPS_MNGCG) == 0)
   14120 		    && ((data & NVM_CFG2_MNGM_MASK)
   14121 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14122 			return 1;
   14123 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14124 	    && ((manc & MANC_ASF_EN) == 0))
   14125 		return 1;
   14126 
   14127 	return 0;
   14128 }
   14129 
   14130 static bool
   14131 wm_phy_resetisblocked(struct wm_softc *sc)
   14132 {
   14133 	bool blocked = false;
   14134 	uint32_t reg;
   14135 	int i = 0;
   14136 
   14137 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14138 		device_xname(sc->sc_dev), __func__));
   14139 
   14140 	switch (sc->sc_type) {
   14141 	case WM_T_ICH8:
   14142 	case WM_T_ICH9:
   14143 	case WM_T_ICH10:
   14144 	case WM_T_PCH:
   14145 	case WM_T_PCH2:
   14146 	case WM_T_PCH_LPT:
   14147 	case WM_T_PCH_SPT:
   14148 	case WM_T_PCH_CNP:
   14149 		do {
   14150 			reg = CSR_READ(sc, WMREG_FWSM);
   14151 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14152 				blocked = true;
   14153 				delay(10*1000);
   14154 				continue;
   14155 			}
   14156 			blocked = false;
   14157 		} while (blocked && (i++ < 30));
   14158 		return blocked;
   14159 		break;
   14160 	case WM_T_82571:
   14161 	case WM_T_82572:
   14162 	case WM_T_82573:
   14163 	case WM_T_82574:
   14164 	case WM_T_82583:
   14165 	case WM_T_80003:
   14166 		reg = CSR_READ(sc, WMREG_MANC);
   14167 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14168 			return true;
   14169 		else
   14170 			return false;
   14171 		break;
   14172 	default:
   14173 		/* no problem */
   14174 		break;
   14175 	}
   14176 
   14177 	return false;
   14178 }
   14179 
   14180 static void
   14181 wm_get_hw_control(struct wm_softc *sc)
   14182 {
   14183 	uint32_t reg;
   14184 
   14185 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14186 		device_xname(sc->sc_dev), __func__));
   14187 
   14188 	if (sc->sc_type == WM_T_82573) {
   14189 		reg = CSR_READ(sc, WMREG_SWSM);
   14190 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14191 	} else if (sc->sc_type >= WM_T_82571) {
   14192 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14193 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14194 	}
   14195 }
   14196 
   14197 static void
   14198 wm_release_hw_control(struct wm_softc *sc)
   14199 {
   14200 	uint32_t reg;
   14201 
   14202 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14203 		device_xname(sc->sc_dev), __func__));
   14204 
   14205 	if (sc->sc_type == WM_T_82573) {
   14206 		reg = CSR_READ(sc, WMREG_SWSM);
   14207 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14208 	} else if (sc->sc_type >= WM_T_82571) {
   14209 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14210 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14211 	}
   14212 }
   14213 
   14214 static void
   14215 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14216 {
   14217 	uint32_t reg;
   14218 
   14219 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14220 		device_xname(sc->sc_dev), __func__));
   14221 
   14222 	if (sc->sc_type < WM_T_PCH2)
   14223 		return;
   14224 
   14225 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14226 
   14227 	if (gate)
   14228 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14229 	else
   14230 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14231 
   14232 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14233 }
   14234 
   14235 static int
   14236 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14237 {
   14238 	uint32_t fwsm, reg;
   14239 	int rv = 0;
   14240 
   14241 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14242 		device_xname(sc->sc_dev), __func__));
   14243 
   14244 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14245 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14246 
   14247 	/* Disable ULP */
   14248 	wm_ulp_disable(sc);
   14249 
   14250 	/* Acquire PHY semaphore */
   14251 	rv = sc->phy.acquire(sc);
   14252 	if (rv != 0) {
   14253 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14254 		device_xname(sc->sc_dev), __func__));
   14255 		return -1;
   14256 	}
   14257 
   14258 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14259 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14260 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14261 	 */
   14262 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14263 	switch (sc->sc_type) {
   14264 	case WM_T_PCH_LPT:
   14265 	case WM_T_PCH_SPT:
   14266 	case WM_T_PCH_CNP:
   14267 		if (wm_phy_is_accessible_pchlan(sc))
   14268 			break;
   14269 
   14270 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14271 		 * forcing MAC to SMBus mode first.
   14272 		 */
   14273 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14274 		reg |= CTRL_EXT_FORCE_SMBUS;
   14275 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14276 #if 0
   14277 		/* XXX Isn't this required??? */
   14278 		CSR_WRITE_FLUSH(sc);
   14279 #endif
   14280 		/* Wait 50 milliseconds for MAC to finish any retries
   14281 		 * that it might be trying to perform from previous
   14282 		 * attempts to acknowledge any phy read requests.
   14283 		 */
   14284 		delay(50 * 1000);
   14285 		/* FALLTHROUGH */
   14286 	case WM_T_PCH2:
   14287 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14288 			break;
   14289 		/* FALLTHROUGH */
   14290 	case WM_T_PCH:
   14291 		if (sc->sc_type == WM_T_PCH)
   14292 			if ((fwsm & FWSM_FW_VALID) != 0)
   14293 				break;
   14294 
   14295 		if (wm_phy_resetisblocked(sc) == true) {
   14296 			printf("XXX reset is blocked(3)\n");
   14297 			break;
   14298 		}
   14299 
   14300 		/* Toggle LANPHYPC Value bit */
   14301 		wm_toggle_lanphypc_pch_lpt(sc);
   14302 
   14303 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14304 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14305 				break;
   14306 
   14307 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14308 			 * so ensure that the MAC is also out of SMBus mode
   14309 			 */
   14310 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14311 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14312 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14313 
   14314 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14315 				break;
   14316 			rv = -1;
   14317 		}
   14318 		break;
   14319 	default:
   14320 		break;
   14321 	}
   14322 
   14323 	/* Release semaphore */
   14324 	sc->phy.release(sc);
   14325 
   14326 	if (rv == 0) {
   14327 		/* Check to see if able to reset PHY.  Print error if not */
   14328 		if (wm_phy_resetisblocked(sc)) {
   14329 			printf("XXX reset is blocked(4)\n");
   14330 			goto out;
   14331 		}
   14332 
   14333 		/* Reset the PHY before any access to it.  Doing so, ensures
   14334 		 * that the PHY is in a known good state before we read/write
   14335 		 * PHY registers.  The generic reset is sufficient here,
   14336 		 * because we haven't determined the PHY type yet.
   14337 		 */
   14338 		if (wm_reset_phy(sc) != 0)
   14339 			goto out;
   14340 
   14341 		/* On a successful reset, possibly need to wait for the PHY
   14342 		 * to quiesce to an accessible state before returning control
   14343 		 * to the calling function.  If the PHY does not quiesce, then
   14344 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14345 		 *  the PHY is in.
   14346 		 */
   14347 		if (wm_phy_resetisblocked(sc))
   14348 			printf("XXX reset is blocked(4)\n");
   14349 	}
   14350 
   14351 out:
   14352 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14353 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14354 		delay(10*1000);
   14355 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14356 	}
   14357 
   14358 	return 0;
   14359 }
   14360 
   14361 static void
   14362 wm_init_manageability(struct wm_softc *sc)
   14363 {
   14364 
   14365 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14366 		device_xname(sc->sc_dev), __func__));
   14367 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14368 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14369 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14370 
   14371 		/* Disable hardware interception of ARP */
   14372 		manc &= ~MANC_ARP_EN;
   14373 
   14374 		/* Enable receiving management packets to the host */
   14375 		if (sc->sc_type >= WM_T_82571) {
   14376 			manc |= MANC_EN_MNG2HOST;
   14377 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14378 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14379 		}
   14380 
   14381 		CSR_WRITE(sc, WMREG_MANC, manc);
   14382 	}
   14383 }
   14384 
   14385 static void
   14386 wm_release_manageability(struct wm_softc *sc)
   14387 {
   14388 
   14389 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14390 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14391 
   14392 		manc |= MANC_ARP_EN;
   14393 		if (sc->sc_type >= WM_T_82571)
   14394 			manc &= ~MANC_EN_MNG2HOST;
   14395 
   14396 		CSR_WRITE(sc, WMREG_MANC, manc);
   14397 	}
   14398 }
   14399 
   14400 static void
   14401 wm_get_wakeup(struct wm_softc *sc)
   14402 {
   14403 
   14404 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14405 	switch (sc->sc_type) {
   14406 	case WM_T_82573:
   14407 	case WM_T_82583:
   14408 		sc->sc_flags |= WM_F_HAS_AMT;
   14409 		/* FALLTHROUGH */
   14410 	case WM_T_80003:
   14411 	case WM_T_82575:
   14412 	case WM_T_82576:
   14413 	case WM_T_82580:
   14414 	case WM_T_I350:
   14415 	case WM_T_I354:
   14416 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14417 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14418 		/* FALLTHROUGH */
   14419 	case WM_T_82541:
   14420 	case WM_T_82541_2:
   14421 	case WM_T_82547:
   14422 	case WM_T_82547_2:
   14423 	case WM_T_82571:
   14424 	case WM_T_82572:
   14425 	case WM_T_82574:
   14426 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14427 		break;
   14428 	case WM_T_ICH8:
   14429 	case WM_T_ICH9:
   14430 	case WM_T_ICH10:
   14431 	case WM_T_PCH:
   14432 	case WM_T_PCH2:
   14433 	case WM_T_PCH_LPT:
   14434 	case WM_T_PCH_SPT:
   14435 	case WM_T_PCH_CNP:
   14436 		sc->sc_flags |= WM_F_HAS_AMT;
   14437 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14438 		break;
   14439 	default:
   14440 		break;
   14441 	}
   14442 
   14443 	/* 1: HAS_MANAGE */
   14444 	if (wm_enable_mng_pass_thru(sc) != 0)
   14445 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14446 
   14447 	/*
   14448 	 * Note that the WOL flags is set after the resetting of the eeprom
   14449 	 * stuff
   14450 	 */
   14451 }
   14452 
   14453 /*
   14454  * Unconfigure Ultra Low Power mode.
   14455  * Only for I217 and newer (see below).
   14456  */
   14457 static int
   14458 wm_ulp_disable(struct wm_softc *sc)
   14459 {
   14460 	uint32_t reg;
   14461 	uint16_t phyreg;
   14462 	int i = 0, rv = 0;
   14463 
   14464 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14465 		device_xname(sc->sc_dev), __func__));
   14466 	/* Exclude old devices */
   14467 	if ((sc->sc_type < WM_T_PCH_LPT)
   14468 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14469 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14470 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14471 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14472 		return 0;
   14473 
   14474 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14475 		/* Request ME un-configure ULP mode in the PHY */
   14476 		reg = CSR_READ(sc, WMREG_H2ME);
   14477 		reg &= ~H2ME_ULP;
   14478 		reg |= H2ME_ENFORCE_SETTINGS;
   14479 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14480 
   14481 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14482 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14483 			if (i++ == 30) {
   14484 				printf("%s timed out\n", __func__);
   14485 				return -1;
   14486 			}
   14487 			delay(10 * 1000);
   14488 		}
   14489 		reg = CSR_READ(sc, WMREG_H2ME);
   14490 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14491 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14492 
   14493 		return 0;
   14494 	}
   14495 
   14496 	/* Acquire semaphore */
   14497 	rv = sc->phy.acquire(sc);
   14498 	if (rv != 0) {
   14499 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14500 		device_xname(sc->sc_dev), __func__));
   14501 		return -1;
   14502 	}
   14503 
   14504 	/* Toggle LANPHYPC */
   14505 	wm_toggle_lanphypc_pch_lpt(sc);
   14506 
   14507 	/* Unforce SMBus mode in PHY */
   14508 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14509 	if (rv != 0) {
   14510 		uint32_t reg2;
   14511 
   14512 		printf("%s: Force SMBus first.\n", __func__);
   14513 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14514 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14515 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14516 		delay(50 * 1000);
   14517 
   14518 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14519 		    &phyreg);
   14520 		if (rv != 0)
   14521 			goto release;
   14522 	}
   14523 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14524 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14525 
   14526 	/* Unforce SMBus mode in MAC */
   14527 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14528 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14529 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14530 
   14531 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14532 	if (rv != 0)
   14533 		goto release;
   14534 	phyreg |= HV_PM_CTRL_K1_ENA;
   14535 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14536 
   14537 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14538 		&phyreg);
   14539 	if (rv != 0)
   14540 		goto release;
   14541 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14542 	    | I218_ULP_CONFIG1_STICKY_ULP
   14543 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14544 	    | I218_ULP_CONFIG1_WOL_HOST
   14545 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14546 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14547 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14548 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14549 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14550 	phyreg |= I218_ULP_CONFIG1_START;
   14551 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14552 
   14553 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14554 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14555 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14556 
   14557 release:
   14558 	/* Release semaphore */
   14559 	sc->phy.release(sc);
   14560 	wm_gmii_reset(sc);
   14561 	delay(50 * 1000);
   14562 
   14563 	return rv;
   14564 }
   14565 
   14566 /* WOL in the newer chipset interfaces (pchlan) */
   14567 static int
   14568 wm_enable_phy_wakeup(struct wm_softc *sc)
   14569 {
   14570 	device_t dev = sc->sc_dev;
   14571 	uint32_t mreg, moff;
   14572 	uint16_t wuce, wuc, wufc, preg;
   14573 	int i, rv;
   14574 
   14575 	KASSERT(sc->sc_type >= WM_T_PCH);
   14576 
   14577 	/* Copy MAC RARs to PHY RARs */
   14578 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14579 
   14580 	/* Activate PHY wakeup */
   14581 	rv = sc->phy.acquire(sc);
   14582 	if (rv != 0) {
   14583 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14584 		    __func__);
   14585 		return rv;
   14586 	}
   14587 
   14588 	/*
   14589 	 * Enable access to PHY wakeup registers.
   14590 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14591 	 */
   14592 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14593 	if (rv != 0) {
   14594 		device_printf(dev,
   14595 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14596 		goto release;
   14597 	}
   14598 
   14599 	/* Copy MAC MTA to PHY MTA */
   14600 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14601 		uint16_t lo, hi;
   14602 
   14603 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14604 		lo = (uint16_t)(mreg & 0xffff);
   14605 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14606 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14607 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14608 	}
   14609 
   14610 	/* Configure PHY Rx Control register */
   14611 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14612 	mreg = CSR_READ(sc, WMREG_RCTL);
   14613 	if (mreg & RCTL_UPE)
   14614 		preg |= BM_RCTL_UPE;
   14615 	if (mreg & RCTL_MPE)
   14616 		preg |= BM_RCTL_MPE;
   14617 	preg &= ~(BM_RCTL_MO_MASK);
   14618 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14619 	if (moff != 0)
   14620 		preg |= moff << BM_RCTL_MO_SHIFT;
   14621 	if (mreg & RCTL_BAM)
   14622 		preg |= BM_RCTL_BAM;
   14623 	if (mreg & RCTL_PMCF)
   14624 		preg |= BM_RCTL_PMCF;
   14625 	mreg = CSR_READ(sc, WMREG_CTRL);
   14626 	if (mreg & CTRL_RFCE)
   14627 		preg |= BM_RCTL_RFCE;
   14628 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14629 
   14630 	wuc = WUC_APME | WUC_PME_EN;
   14631 	wufc = WUFC_MAG;
   14632 	/* Enable PHY wakeup in MAC register */
   14633 	CSR_WRITE(sc, WMREG_WUC,
   14634 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14635 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14636 
   14637 	/* Configure and enable PHY wakeup in PHY registers */
   14638 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14639 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14640 
   14641 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14642 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14643 
   14644 release:
   14645 	sc->phy.release(sc);
   14646 
   14647 	return 0;
   14648 }
   14649 
   14650 /* Power down workaround on D3 */
   14651 static void
   14652 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14653 {
   14654 	uint32_t reg;
   14655 	uint16_t phyreg;
   14656 	int i;
   14657 
   14658 	for (i = 0; i < 2; i++) {
   14659 		/* Disable link */
   14660 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14661 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14662 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14663 
   14664 		/*
   14665 		 * Call gig speed drop workaround on Gig disable before
   14666 		 * accessing any PHY registers
   14667 		 */
   14668 		if (sc->sc_type == WM_T_ICH8)
   14669 			wm_gig_downshift_workaround_ich8lan(sc);
   14670 
   14671 		/* Write VR power-down enable */
   14672 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14673 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14674 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14675 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14676 
   14677 		/* Read it back and test */
   14678 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14679 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14680 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14681 			break;
   14682 
   14683 		/* Issue PHY reset and repeat at most one more time */
   14684 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14685 	}
   14686 }
   14687 
   14688 /*
   14689  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14690  *  @sc: pointer to the HW structure
   14691  *
   14692  *  During S0 to Sx transition, it is possible the link remains at gig
   14693  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14694  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14695  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14696  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14697  *  needs to be written.
   14698  *  Parts that support (and are linked to a partner which support) EEE in
   14699  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14700  *  than 10Mbps w/o EEE.
   14701  */
   14702 static void
   14703 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14704 {
   14705 	uint32_t phy_ctrl;
   14706 
   14707 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14708 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14709 
   14710 	if (sc->sc_phytype == WMPHY_I217) {
   14711 		uint16_t devid = sc->sc_pcidevid;
   14712 
   14713 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14714 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14715 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14716 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14717 		    (sc->sc_type >= WM_T_PCH_SPT))
   14718 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14719 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14720 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14721 
   14722 #if 0 /* notyet */
   14723 		if (sc->phy.acquire(sc) != 0)
   14724 			goto out;
   14725 
   14726 		/* XXX Do workaround for EEE */
   14727 
   14728 		/*
   14729 		 * For i217 Intel Rapid Start Technology support,
   14730 		 * when the system is going into Sx and no manageability engine
   14731 		 * is present, the driver must configure proxy to reset only on
   14732 		 * power good.	LPI (Low Power Idle) state must also reset only
   14733 		 * on power good, as well as the MTA (Multicast table array).
   14734 		 * The SMBus release must also be disabled on LCD reset.
   14735 		 */
   14736 
   14737 		/*
   14738 		 * Enable MTA to reset for Intel Rapid Start Technology
   14739 		 * Support
   14740 		 */
   14741 
   14742 		sc->phy.release(sc);
   14743 #endif
   14744 	}
   14745 #if 0
   14746 out:
   14747 #endif
   14748 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14749 
   14750 	if (sc->sc_type == WM_T_ICH8)
   14751 		wm_gig_downshift_workaround_ich8lan(sc);
   14752 
   14753 	if (sc->sc_type >= WM_T_PCH) {
   14754 		wm_oem_bits_config_ich8lan(sc, false);
   14755 
   14756 		/* Reset PHY to activate OEM bits on 82577/8 */
   14757 		if (sc->sc_type == WM_T_PCH)
   14758 			wm_reset_phy(sc);
   14759 
   14760 		if (sc->phy.acquire(sc) != 0)
   14761 			return;
   14762 		wm_write_smbus_addr(sc);
   14763 		sc->phy.release(sc);
   14764 	}
   14765 }
   14766 
   14767 /*
   14768  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14769  *  @sc: pointer to the HW structure
   14770  *
   14771  *  During Sx to S0 transitions on non-managed devices or managed devices
   14772  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14773  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14774  *  the PHY.
   14775  *  On i217, setup Intel Rapid Start Technology.
   14776  */
   14777 static int
   14778 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14779 {
   14780 	device_t dev = sc->sc_dev;
   14781 	int rv;
   14782 
   14783 	if (sc->sc_type < WM_T_PCH2)
   14784 		return 0;
   14785 
   14786 	rv = wm_init_phy_workarounds_pchlan(sc);
   14787 	if (rv != 0)
   14788 		return -1;
   14789 
   14790 	/* For i217 Intel Rapid Start Technology support when the system
   14791 	 * is transitioning from Sx and no manageability engine is present
   14792 	 * configure SMBus to restore on reset, disable proxy, and enable
   14793 	 * the reset on MTA (Multicast table array).
   14794 	 */
   14795 	if (sc->sc_phytype == WMPHY_I217) {
   14796 		uint16_t phy_reg;
   14797 
   14798 		if (sc->phy.acquire(sc) != 0)
   14799 			return -1;
   14800 
   14801 		/* Clear Auto Enable LPI after link up */
   14802 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14803 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14804 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14805 
   14806 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14807 			/* Restore clear on SMB if no manageability engine
   14808 			 * is present
   14809 			 */
   14810 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14811 			    &phy_reg);
   14812 			if (rv != 0)
   14813 				goto release;
   14814 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14815 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14816 
   14817 			/* Disable Proxy */
   14818 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14819 		}
   14820 		/* Enable reset on MTA */
   14821 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14822 		if (rv != 0)
   14823 			goto release;
   14824 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14825 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14826 
   14827 release:
   14828 		sc->phy.release(sc);
   14829 		return rv;
   14830 	}
   14831 
   14832 	return 0;
   14833 }
   14834 
   14835 static void
   14836 wm_enable_wakeup(struct wm_softc *sc)
   14837 {
   14838 	uint32_t reg, pmreg;
   14839 	pcireg_t pmode;
   14840 	int rv = 0;
   14841 
   14842 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14843 		device_xname(sc->sc_dev), __func__));
   14844 
   14845 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14846 	    &pmreg, NULL) == 0)
   14847 		return;
   14848 
   14849 	if ((sc->sc_flags & WM_F_WOL) == 0)
   14850 		goto pme;
   14851 
   14852 	/* Advertise the wakeup capability */
   14853 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14854 	    | CTRL_SWDPIN(3));
   14855 
   14856 	/* Keep the laser running on fiber adapters */
   14857 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14858 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14859 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14860 		reg |= CTRL_EXT_SWDPIN(3);
   14861 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14862 	}
   14863 
   14864 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   14865 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   14866 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   14867 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   14868 		wm_suspend_workarounds_ich8lan(sc);
   14869 
   14870 #if 0	/* for the multicast packet */
   14871 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14872 	reg |= WUFC_MC;
   14873 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14874 #endif
   14875 
   14876 	if (sc->sc_type >= WM_T_PCH) {
   14877 		rv = wm_enable_phy_wakeup(sc);
   14878 		if (rv != 0)
   14879 			goto pme;
   14880 	} else {
   14881 		/* Enable wakeup by the MAC */
   14882 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   14883 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   14884 	}
   14885 
   14886 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14887 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14888 		|| (sc->sc_type == WM_T_PCH2))
   14889 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14890 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14891 
   14892 pme:
   14893 	/* Request PME */
   14894 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14895 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   14896 		/* For WOL */
   14897 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14898 	} else {
   14899 		/* Disable WOL */
   14900 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14901 	}
   14902 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14903 }
   14904 
   14905 /* Disable ASPM L0s and/or L1 for workaround */
   14906 static void
   14907 wm_disable_aspm(struct wm_softc *sc)
   14908 {
   14909 	pcireg_t reg, mask = 0;
   14910 	unsigned const char *str = "";
   14911 
   14912 	/*
   14913 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14914 	 * space.
   14915 	 */
   14916 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14917 		return;
   14918 
   14919 	switch (sc->sc_type) {
   14920 	case WM_T_82571:
   14921 	case WM_T_82572:
   14922 		/*
   14923 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14924 		 * State Power management L1 State (ASPM L1).
   14925 		 */
   14926 		mask = PCIE_LCSR_ASPM_L1;
   14927 		str = "L1 is";
   14928 		break;
   14929 	case WM_T_82573:
   14930 	case WM_T_82574:
   14931 	case WM_T_82583:
   14932 		/*
   14933 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14934 		 *
   14935 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14936 		 * some chipset.  The document of 82574 and 82583 says that
   14937 		 * disabling L0s with some specific chipset is sufficient,
   14938 		 * but we follow as of the Intel em driver does.
   14939 		 *
   14940 		 * References:
   14941 		 * Errata 8 of the Specification Update of i82573.
   14942 		 * Errata 20 of the Specification Update of i82574.
   14943 		 * Errata 9 of the Specification Update of i82583.
   14944 		 */
   14945 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14946 		str = "L0s and L1 are";
   14947 		break;
   14948 	default:
   14949 		return;
   14950 	}
   14951 
   14952 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14953 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14954 	reg &= ~mask;
   14955 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14956 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14957 
   14958 	/* Print only in wm_attach() */
   14959 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14960 		aprint_verbose_dev(sc->sc_dev,
   14961 		    "ASPM %s disabled to workaround the errata.\n", str);
   14962 }
   14963 
   14964 /* LPLU */
   14965 
   14966 static void
   14967 wm_lplu_d0_disable(struct wm_softc *sc)
   14968 {
   14969 	struct mii_data *mii = &sc->sc_mii;
   14970 	uint32_t reg;
   14971 	uint16_t phyval;
   14972 
   14973 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14974 		device_xname(sc->sc_dev), __func__));
   14975 
   14976 	if (sc->sc_phytype == WMPHY_IFE)
   14977 		return;
   14978 
   14979 	switch (sc->sc_type) {
   14980 	case WM_T_82571:
   14981 	case WM_T_82572:
   14982 	case WM_T_82573:
   14983 	case WM_T_82575:
   14984 	case WM_T_82576:
   14985 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   14986 		phyval &= ~PMR_D0_LPLU;
   14987 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   14988 		break;
   14989 	case WM_T_82580:
   14990 	case WM_T_I350:
   14991 	case WM_T_I210:
   14992 	case WM_T_I211:
   14993 		reg = CSR_READ(sc, WMREG_PHPM);
   14994 		reg &= ~PHPM_D0A_LPLU;
   14995 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14996 		break;
   14997 	case WM_T_82574:
   14998 	case WM_T_82583:
   14999 	case WM_T_ICH8:
   15000 	case WM_T_ICH9:
   15001 	case WM_T_ICH10:
   15002 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15003 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15004 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15005 		CSR_WRITE_FLUSH(sc);
   15006 		break;
   15007 	case WM_T_PCH:
   15008 	case WM_T_PCH2:
   15009 	case WM_T_PCH_LPT:
   15010 	case WM_T_PCH_SPT:
   15011 	case WM_T_PCH_CNP:
   15012 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15013 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15014 		if (wm_phy_resetisblocked(sc) == false)
   15015 			phyval |= HV_OEM_BITS_ANEGNOW;
   15016 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15017 		break;
   15018 	default:
   15019 		break;
   15020 	}
   15021 }
   15022 
   15023 /* EEE */
   15024 
   15025 static int
   15026 wm_set_eee_i350(struct wm_softc *sc)
   15027 {
   15028 	struct ethercom *ec = &sc->sc_ethercom;
   15029 	uint32_t ipcnfg, eeer;
   15030 	uint32_t ipcnfg_mask
   15031 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15032 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15033 
   15034 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15035 	eeer = CSR_READ(sc, WMREG_EEER);
   15036 
   15037 	/* enable or disable per user setting */
   15038 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15039 		ipcnfg |= ipcnfg_mask;
   15040 		eeer |= eeer_mask;
   15041 	} else {
   15042 		ipcnfg &= ~ipcnfg_mask;
   15043 		eeer &= ~eeer_mask;
   15044 	}
   15045 
   15046 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15047 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15048 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15049 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15050 
   15051 	return 0;
   15052 }
   15053 
   15054 static int
   15055 wm_set_eee_pchlan(struct wm_softc *sc)
   15056 {
   15057 	device_t dev = sc->sc_dev;
   15058 	struct ethercom *ec = &sc->sc_ethercom;
   15059 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15060 	int rv = 0;
   15061 
   15062 	switch (sc->sc_phytype) {
   15063 	case WMPHY_82579:
   15064 		lpa = I82579_EEE_LP_ABILITY;
   15065 		pcs_status = I82579_EEE_PCS_STATUS;
   15066 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15067 		break;
   15068 	case WMPHY_I217:
   15069 		lpa = I217_EEE_LP_ABILITY;
   15070 		pcs_status = I217_EEE_PCS_STATUS;
   15071 		adv_addr = I217_EEE_ADVERTISEMENT;
   15072 		break;
   15073 	default:
   15074 		return 0;
   15075 	}
   15076 
   15077 	if (sc->phy.acquire(sc)) {
   15078 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15079 		return 0;
   15080 	}
   15081 
   15082 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15083 	if (rv != 0)
   15084 		goto release;
   15085 
   15086 	/* Clear bits that enable EEE in various speeds */
   15087 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15088 
   15089 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15090 		/* Save off link partner's EEE ability */
   15091 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15092 		if (rv != 0)
   15093 			goto release;
   15094 
   15095 		/* Read EEE advertisement */
   15096 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15097 			goto release;
   15098 
   15099 		/*
   15100 		 * Enable EEE only for speeds in which the link partner is
   15101 		 * EEE capable and for which we advertise EEE.
   15102 		 */
   15103 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15104 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15105 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15106 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15107 			if ((data & ANLPAR_TX_FD) != 0)
   15108 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15109 			else {
   15110 				/*
   15111 				 * EEE is not supported in 100Half, so ignore
   15112 				 * partner's EEE in 100 ability if full-duplex
   15113 				 * is not advertised.
   15114 				 */
   15115 				sc->eee_lp_ability
   15116 				    &= ~AN_EEEADVERT_100_TX;
   15117 			}
   15118 		}
   15119 	}
   15120 
   15121 	if (sc->sc_phytype == WMPHY_82579) {
   15122 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15123 		if (rv != 0)
   15124 			goto release;
   15125 
   15126 		data &= ~I82579_LPI_PLL_SHUT_100;
   15127 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15128 	}
   15129 
   15130 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15131 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15132 		goto release;
   15133 
   15134 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15135 release:
   15136 	sc->phy.release(sc);
   15137 
   15138 	return rv;
   15139 }
   15140 
   15141 static int
   15142 wm_set_eee(struct wm_softc *sc)
   15143 {
   15144 	struct ethercom *ec = &sc->sc_ethercom;
   15145 
   15146 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15147 		return 0;
   15148 
   15149 	if (sc->sc_type == WM_T_I354) {
   15150 		/* I354 uses an external PHY */
   15151 		return 0; /* not yet */
   15152 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15153 		return wm_set_eee_i350(sc);
   15154 	else if (sc->sc_type >= WM_T_PCH2)
   15155 		return wm_set_eee_pchlan(sc);
   15156 
   15157 	return 0;
   15158 }
   15159 
   15160 /*
   15161  * Workarounds (mainly PHY related).
   15162  * Basically, PHY's workarounds are in the PHY drivers.
   15163  */
   15164 
   15165 /* Work-around for 82566 Kumeran PCS lock loss */
   15166 static int
   15167 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15168 {
   15169 	struct mii_data *mii = &sc->sc_mii;
   15170 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15171 	int i, reg, rv;
   15172 	uint16_t phyreg;
   15173 
   15174 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15175 		device_xname(sc->sc_dev), __func__));
   15176 
   15177 	/* If the link is not up, do nothing */
   15178 	if ((status & STATUS_LU) == 0)
   15179 		return 0;
   15180 
   15181 	/* Nothing to do if the link is other than 1Gbps */
   15182 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15183 		return 0;
   15184 
   15185 	for (i = 0; i < 10; i++) {
   15186 		/* read twice */
   15187 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15188 		if (rv != 0)
   15189 			return rv;
   15190 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15191 		if (rv != 0)
   15192 			return rv;
   15193 
   15194 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15195 			goto out;	/* GOOD! */
   15196 
   15197 		/* Reset the PHY */
   15198 		wm_reset_phy(sc);
   15199 		delay(5*1000);
   15200 	}
   15201 
   15202 	/* Disable GigE link negotiation */
   15203 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15204 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15205 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15206 
   15207 	/*
   15208 	 * Call gig speed drop workaround on Gig disable before accessing
   15209 	 * any PHY registers.
   15210 	 */
   15211 	wm_gig_downshift_workaround_ich8lan(sc);
   15212 
   15213 out:
   15214 	return 0;
   15215 }
   15216 
   15217 /*
   15218  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15219  *  @sc: pointer to the HW structure
   15220  *
   15221  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15222  *  LPLU, Gig disable, MDIC PHY reset):
   15223  *    1) Set Kumeran Near-end loopback
   15224  *    2) Clear Kumeran Near-end loopback
   15225  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15226  */
   15227 static void
   15228 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15229 {
   15230 	uint16_t kmreg;
   15231 
   15232 	/* Only for igp3 */
   15233 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15234 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15235 			return;
   15236 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15237 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15238 			return;
   15239 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15240 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15241 	}
   15242 }
   15243 
   15244 /*
   15245  * Workaround for pch's PHYs
   15246  * XXX should be moved to new PHY driver?
   15247  */
   15248 static int
   15249 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15250 {
   15251 	int rv;
   15252 
   15253 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15254 		device_xname(sc->sc_dev), __func__));
   15255 	KASSERT(sc->sc_type == WM_T_PCH);
   15256 
   15257 	if (sc->sc_phytype == WMPHY_82577)
   15258 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15259 			return rv;
   15260 
   15261 	/* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   15262 
   15263 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15264 
   15265 	/* 82578 */
   15266 	if (sc->sc_phytype == WMPHY_82578) {
   15267 		struct mii_softc *child;
   15268 
   15269 		/*
   15270 		 * Return registers to default by doing a soft reset then
   15271 		 * writing 0x3140 to the control register
   15272 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15273 		 */
   15274 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15275 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   15276 			PHY_RESET(child);
   15277 			rv = sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   15278 			    0x3140);
   15279 			if (rv != 0)
   15280 				return rv;
   15281 		}
   15282 	}
   15283 
   15284 	/* Select page 0 */
   15285 	if ((rv = sc->phy.acquire(sc)) != 0)
   15286 		return rv;
   15287 	rv = wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15288 	sc->phy.release(sc);
   15289 	if (rv != 0)
   15290 		return rv;
   15291 
   15292 	/*
   15293 	 * Configure the K1 Si workaround during phy reset assuming there is
   15294 	 * link so that it disables K1 if link is in 1Gbps.
   15295 	 */
   15296 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15297 		return rv;
   15298 
   15299 	return rv;
   15300 }
   15301 
   15302 /*
   15303  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15304  *  @sc:   pointer to the HW structure
   15305  */
   15306 static void
   15307 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15308 {
   15309 	device_t dev = sc->sc_dev;
   15310 	uint32_t mac_reg;
   15311 	uint16_t i, wuce;
   15312 	int count;
   15313 
   15314 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15315 		device_xname(sc->sc_dev), __func__));
   15316 
   15317 	if (sc->phy.acquire(sc) != 0)
   15318 		return;
   15319 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15320 		goto release;
   15321 
   15322 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15323 	count = wm_rar_count(sc);
   15324 	for (i = 0; i < count; i++) {
   15325 		uint16_t lo, hi;
   15326 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15327 		lo = (uint16_t)(mac_reg & 0xffff);
   15328 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15329 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15330 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15331 
   15332 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15333 		lo = (uint16_t)(mac_reg & 0xffff);
   15334 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15335 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15336 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15337 	}
   15338 
   15339 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15340 
   15341 release:
   15342 	sc->phy.release(sc);
   15343 }
   15344 
   15345 /*
   15346  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15347  *  done after every PHY reset.
   15348  */
   15349 static int
   15350 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15351 {
   15352 	int rv;
   15353 
   15354 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15355 		device_xname(sc->sc_dev), __func__));
   15356 	KASSERT(sc->sc_type == WM_T_PCH2);
   15357 
   15358 	/* Set MDIO slow mode before any other MDIO access */
   15359 	rv = wm_set_mdio_slow_mode_hv(sc);
   15360 
   15361 	/* XXX set MSE higher to enable link to stay up when noise is high */
   15362 	/* XXX drop link after 5 times MSE threshold was reached */
   15363 
   15364 	return rv;
   15365 }
   15366 
   15367 /**
   15368  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15369  *  @link: link up bool flag
   15370  *
   15371  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15372  *  preventing further DMA write requests.  Workaround the issue by disabling
   15373  *  the de-assertion of the clock request when in 1Gpbs mode.
   15374  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15375  *  speeds in order to avoid Tx hangs.
   15376  **/
   15377 static int
   15378 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15379 {
   15380 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15381 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15382 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15383 	uint16_t phyreg;
   15384 
   15385 	if (link && (speed == STATUS_SPEED_1000)) {
   15386 		sc->phy.acquire(sc);
   15387 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15388 		    &phyreg);
   15389 		if (rv != 0)
   15390 			goto release;
   15391 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15392 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15393 		if (rv != 0)
   15394 			goto release;
   15395 		delay(20);
   15396 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15397 
   15398 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15399 		    &phyreg);
   15400 release:
   15401 		sc->phy.release(sc);
   15402 		return rv;
   15403 	}
   15404 
   15405 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15406 
   15407 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15408 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15409 	    || !link
   15410 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15411 		goto update_fextnvm6;
   15412 
   15413 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15414 
   15415 	/* Clear link status transmit timeout */
   15416 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15417 	if (speed == STATUS_SPEED_100) {
   15418 		/* Set inband Tx timeout to 5x10us for 100Half */
   15419 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15420 
   15421 		/* Do not extend the K1 entry latency for 100Half */
   15422 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15423 	} else {
   15424 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15425 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15426 
   15427 		/* Extend the K1 entry latency for 10 Mbps */
   15428 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15429 	}
   15430 
   15431 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15432 
   15433 update_fextnvm6:
   15434 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15435 	return 0;
   15436 }
   15437 
   15438 /*
   15439  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15440  *  @sc:   pointer to the HW structure
   15441  *  @link: link up bool flag
   15442  *
   15443  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15444  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15445  *  If link is down, the function will restore the default K1 setting located
   15446  *  in the NVM.
   15447  */
   15448 static int
   15449 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15450 {
   15451 	int k1_enable = sc->sc_nvm_k1_enabled;
   15452 
   15453 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15454 		device_xname(sc->sc_dev), __func__));
   15455 
   15456 	if (sc->phy.acquire(sc) != 0)
   15457 		return -1;
   15458 
   15459 	if (link) {
   15460 		k1_enable = 0;
   15461 
   15462 		/* Link stall fix for link up */
   15463 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15464 		    0x0100);
   15465 	} else {
   15466 		/* Link stall fix for link down */
   15467 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15468 		    0x4100);
   15469 	}
   15470 
   15471 	wm_configure_k1_ich8lan(sc, k1_enable);
   15472 	sc->phy.release(sc);
   15473 
   15474 	return 0;
   15475 }
   15476 
   15477 /*
   15478  *  wm_k1_workaround_lv - K1 Si workaround
   15479  *  @sc:   pointer to the HW structure
   15480  *
   15481  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15482  *  Disable K1 for 1000 and 100 speeds
   15483  */
   15484 static int
   15485 wm_k1_workaround_lv(struct wm_softc *sc)
   15486 {
   15487 	uint32_t reg;
   15488 	uint16_t phyreg;
   15489 	int rv;
   15490 
   15491 	if (sc->sc_type != WM_T_PCH2)
   15492 		return 0;
   15493 
   15494 	/* Set K1 beacon duration based on 10Mbps speed */
   15495 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15496 	if (rv != 0)
   15497 		return rv;
   15498 
   15499 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15500 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15501 		if (phyreg &
   15502 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15503 			/* LV 1G/100 Packet drop issue wa  */
   15504 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15505 			    &phyreg);
   15506 			if (rv != 0)
   15507 				return rv;
   15508 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15509 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15510 			    phyreg);
   15511 			if (rv != 0)
   15512 				return rv;
   15513 		} else {
   15514 			/* For 10Mbps */
   15515 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15516 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15517 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15518 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15519 		}
   15520 	}
   15521 
   15522 	return 0;
   15523 }
   15524 
   15525 /*
   15526  *  wm_link_stall_workaround_hv - Si workaround
   15527  *  @sc: pointer to the HW structure
   15528  *
   15529  *  This function works around a Si bug where the link partner can get
   15530  *  a link up indication before the PHY does. If small packets are sent
   15531  *  by the link partner they can be placed in the packet buffer without
   15532  *  being properly accounted for by the PHY and will stall preventing
   15533  *  further packets from being received.  The workaround is to clear the
   15534  *  packet buffer after the PHY detects link up.
   15535  */
   15536 static int
   15537 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15538 {
   15539 	uint16_t phyreg;
   15540 
   15541 	if (sc->sc_phytype != WMPHY_82578)
   15542 		return 0;
   15543 
   15544 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15545 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15546 	if ((phyreg & BMCR_LOOP) != 0)
   15547 		return 0;
   15548 
   15549 	/* check if link is up and at 1Gbps */
   15550 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15551 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15552 	    | BM_CS_STATUS_SPEED_MASK;
   15553 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15554 		| BM_CS_STATUS_SPEED_1000))
   15555 		return 0;
   15556 
   15557 	delay(200 * 1000);	/* XXX too big */
   15558 
   15559 	/* flush the packets in the fifo buffer */
   15560 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15561 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15562 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15563 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15564 
   15565 	return 0;
   15566 }
   15567 
   15568 static int
   15569 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15570 {
   15571 	int rv;
   15572 	uint16_t reg;
   15573 
   15574 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15575 	if (rv != 0)
   15576 		return rv;
   15577 
   15578 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15579 	    reg | HV_KMRN_MDIO_SLOW);
   15580 }
   15581 
   15582 /*
   15583  *  wm_configure_k1_ich8lan - Configure K1 power state
   15584  *  @sc: pointer to the HW structure
   15585  *  @enable: K1 state to configure
   15586  *
   15587  *  Configure the K1 power state based on the provided parameter.
   15588  *  Assumes semaphore already acquired.
   15589  */
   15590 static void
   15591 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15592 {
   15593 	uint32_t ctrl, ctrl_ext, tmp;
   15594 	uint16_t kmreg;
   15595 	int rv;
   15596 
   15597 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15598 
   15599 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15600 	if (rv != 0)
   15601 		return;
   15602 
   15603 	if (k1_enable)
   15604 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15605 	else
   15606 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15607 
   15608 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15609 	if (rv != 0)
   15610 		return;
   15611 
   15612 	delay(20);
   15613 
   15614 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15615 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15616 
   15617 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15618 	tmp |= CTRL_FRCSPD;
   15619 
   15620 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15621 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15622 	CSR_WRITE_FLUSH(sc);
   15623 	delay(20);
   15624 
   15625 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15626 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15627 	CSR_WRITE_FLUSH(sc);
   15628 	delay(20);
   15629 
   15630 	return;
   15631 }
   15632 
   15633 /* special case - for 82575 - need to do manual init ... */
   15634 static void
   15635 wm_reset_init_script_82575(struct wm_softc *sc)
   15636 {
   15637 	/*
   15638 	 * remark: this is untested code - we have no board without EEPROM
   15639 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15640 	 */
   15641 
   15642 	/* SerDes configuration via SERDESCTRL */
   15643 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15644 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15645 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15647 
   15648 	/* CCM configuration via CCMCTL register */
   15649 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15650 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15651 
   15652 	/* PCIe lanes configuration */
   15653 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15654 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15655 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15656 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15657 
   15658 	/* PCIe PLL Configuration */
   15659 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15660 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15661 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15662 }
   15663 
   15664 static void
   15665 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15666 {
   15667 	uint32_t reg;
   15668 	uint16_t nvmword;
   15669 	int rv;
   15670 
   15671 	if (sc->sc_type != WM_T_82580)
   15672 		return;
   15673 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15674 		return;
   15675 
   15676 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15677 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15678 	if (rv != 0) {
   15679 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15680 		    __func__);
   15681 		return;
   15682 	}
   15683 
   15684 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15685 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15686 		reg |= MDICNFG_DEST;
   15687 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15688 		reg |= MDICNFG_COM_MDIO;
   15689 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15690 }
   15691 
   15692 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15693 
   15694 static bool
   15695 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15696 {
   15697 	uint32_t reg;
   15698 	uint16_t id1, id2;
   15699 	int i, rv;
   15700 
   15701 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15702 		device_xname(sc->sc_dev), __func__));
   15703 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15704 
   15705 	id1 = id2 = 0xffff;
   15706 	for (i = 0; i < 2; i++) {
   15707 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15708 		    &id1);
   15709 		if ((rv != 0) || MII_INVALIDID(id1))
   15710 			continue;
   15711 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15712 		    &id2);
   15713 		if ((rv != 0) || MII_INVALIDID(id2))
   15714 			continue;
   15715 		break;
   15716 	}
   15717 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15718 		goto out;
   15719 
   15720 	/*
   15721 	 * In case the PHY needs to be in mdio slow mode,
   15722 	 * set slow mode and try to get the PHY id again.
   15723 	 */
   15724 	rv = 0;
   15725 	if (sc->sc_type < WM_T_PCH_LPT) {
   15726 		sc->phy.release(sc);
   15727 		wm_set_mdio_slow_mode_hv(sc);
   15728 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15729 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15730 		sc->phy.acquire(sc);
   15731 	}
   15732 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15733 		printf("XXX return with false\n");
   15734 		return false;
   15735 	}
   15736 out:
   15737 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15738 		/* Only unforce SMBus if ME is not active */
   15739 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15740 			uint16_t phyreg;
   15741 
   15742 			/* Unforce SMBus mode in PHY */
   15743 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15744 			    CV_SMB_CTRL, &phyreg);
   15745 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15746 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15747 			    CV_SMB_CTRL, phyreg);
   15748 
   15749 			/* Unforce SMBus mode in MAC */
   15750 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15751 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15752 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15753 		}
   15754 	}
   15755 	return true;
   15756 }
   15757 
   15758 static void
   15759 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15760 {
   15761 	uint32_t reg;
   15762 	int i;
   15763 
   15764 	/* Set PHY Config Counter to 50msec */
   15765 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15766 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15767 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15768 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15769 
   15770 	/* Toggle LANPHYPC */
   15771 	reg = CSR_READ(sc, WMREG_CTRL);
   15772 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15773 	reg &= ~CTRL_LANPHYPC_VALUE;
   15774 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15775 	CSR_WRITE_FLUSH(sc);
   15776 	delay(1000);
   15777 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15778 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15779 	CSR_WRITE_FLUSH(sc);
   15780 
   15781 	if (sc->sc_type < WM_T_PCH_LPT)
   15782 		delay(50 * 1000);
   15783 	else {
   15784 		i = 20;
   15785 
   15786 		do {
   15787 			delay(5 * 1000);
   15788 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15789 		    && i--);
   15790 
   15791 		delay(30 * 1000);
   15792 	}
   15793 }
   15794 
   15795 static int
   15796 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15797 {
   15798 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   15799 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   15800 	uint32_t rxa;
   15801 	uint16_t scale = 0, lat_enc = 0;
   15802 	int32_t obff_hwm = 0;
   15803 	int64_t lat_ns, value;
   15804 
   15805 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15806 		device_xname(sc->sc_dev), __func__));
   15807 
   15808 	if (link) {
   15809 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   15810 		uint32_t status;
   15811 		uint16_t speed;
   15812 		pcireg_t preg;
   15813 
   15814 		status = CSR_READ(sc, WMREG_STATUS);
   15815 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   15816 		case STATUS_SPEED_10:
   15817 			speed = 10;
   15818 			break;
   15819 		case STATUS_SPEED_100:
   15820 			speed = 100;
   15821 			break;
   15822 		case STATUS_SPEED_1000:
   15823 			speed = 1000;
   15824 			break;
   15825 		default:
   15826 			device_printf(sc->sc_dev, "Unknown speed "
   15827 			    "(status = %08x)\n", status);
   15828 			return -1;
   15829 		}
   15830 
   15831 		/* Rx Packet Buffer Allocation size (KB) */
   15832 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   15833 
   15834 		/*
   15835 		 * Determine the maximum latency tolerated by the device.
   15836 		 *
   15837 		 * Per the PCIe spec, the tolerated latencies are encoded as
   15838 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   15839 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   15840 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   15841 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   15842 		 */
   15843 		lat_ns = ((int64_t)rxa * 1024 -
   15844 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   15845 			+ ETHER_HDR_LEN))) * 8 * 1000;
   15846 		if (lat_ns < 0)
   15847 			lat_ns = 0;
   15848 		else
   15849 			lat_ns /= speed;
   15850 		value = lat_ns;
   15851 
   15852 		while (value > LTRV_VALUE) {
   15853 			scale ++;
   15854 			value = howmany(value, __BIT(5));
   15855 		}
   15856 		if (scale > LTRV_SCALE_MAX) {
   15857 			printf("%s: Invalid LTR latency scale %d\n",
   15858 			    device_xname(sc->sc_dev), scale);
   15859 			return -1;
   15860 		}
   15861 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   15862 
   15863 		/* Determine the maximum latency tolerated by the platform */
   15864 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15865 		    WM_PCI_LTR_CAP_LPT);
   15866 		max_snoop = preg & 0xffff;
   15867 		max_nosnoop = preg >> 16;
   15868 
   15869 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   15870 
   15871 		if (lat_enc > max_ltr_enc) {
   15872 			lat_enc = max_ltr_enc;
   15873 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   15874 			    * PCI_LTR_SCALETONS(
   15875 				    __SHIFTOUT(lat_enc,
   15876 					PCI_LTR_MAXSNOOPLAT_SCALE));
   15877 		}
   15878 
   15879 		if (lat_ns) {
   15880 			lat_ns *= speed * 1000;
   15881 			lat_ns /= 8;
   15882 			lat_ns /= 1000000000;
   15883 			obff_hwm = (int32_t)(rxa - lat_ns);
   15884 		}
   15885 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   15886 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   15887 			    "(rxa = %d, lat_ns = %d)\n",
   15888 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   15889 			return -1;
   15890 		}
   15891 	}
   15892 	/* Snoop and No-Snoop latencies the same */
   15893 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   15894 	CSR_WRITE(sc, WMREG_LTRV, reg);
   15895 
   15896 	/* Set OBFF high water mark */
   15897 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   15898 	reg |= obff_hwm;
   15899 	CSR_WRITE(sc, WMREG_SVT, reg);
   15900 
   15901 	/* Enable OBFF */
   15902 	reg = CSR_READ(sc, WMREG_SVCR);
   15903 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   15904 	CSR_WRITE(sc, WMREG_SVCR, reg);
   15905 
   15906 	return 0;
   15907 }
   15908 
   15909 /*
   15910  * I210 Errata 25 and I211 Errata 10
   15911  * Slow System Clock.
   15912  */
   15913 static int
   15914 wm_pll_workaround_i210(struct wm_softc *sc)
   15915 {
   15916 	uint32_t mdicnfg, wuc;
   15917 	uint32_t reg;
   15918 	pcireg_t pcireg;
   15919 	uint32_t pmreg;
   15920 	uint16_t nvmword, tmp_nvmword;
   15921 	uint16_t phyval;
   15922 	bool wa_done = false;
   15923 	int i, rv = 0;
   15924 
   15925 	/* Get Power Management cap offset */
   15926 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15927 	    &pmreg, NULL) == 0)
   15928 		return -1;
   15929 
   15930 	/* Save WUC and MDICNFG registers */
   15931 	wuc = CSR_READ(sc, WMREG_WUC);
   15932 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   15933 
   15934 	reg = mdicnfg & ~MDICNFG_DEST;
   15935 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15936 
   15937 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   15938 		nvmword = INVM_DEFAULT_AL;
   15939 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   15940 
   15941 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   15942 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   15943 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   15944 
   15945 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   15946 			rv = 0;
   15947 			break; /* OK */
   15948 		} else
   15949 			rv = -1;
   15950 
   15951 		wa_done = true;
   15952 		/* Directly reset the internal PHY */
   15953 		reg = CSR_READ(sc, WMREG_CTRL);
   15954 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   15955 
   15956 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15957 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   15958 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15959 
   15960 		CSR_WRITE(sc, WMREG_WUC, 0);
   15961 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   15962 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15963 
   15964 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15965 		    pmreg + PCI_PMCSR);
   15966 		pcireg |= PCI_PMCSR_STATE_D3;
   15967 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15968 		    pmreg + PCI_PMCSR, pcireg);
   15969 		delay(1000);
   15970 		pcireg &= ~PCI_PMCSR_STATE_D3;
   15971 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15972 		    pmreg + PCI_PMCSR, pcireg);
   15973 
   15974 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   15975 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15976 
   15977 		/* Restore WUC register */
   15978 		CSR_WRITE(sc, WMREG_WUC, wuc);
   15979 	}
   15980 
   15981 	/* Restore MDICNFG setting */
   15982 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   15983 	if (wa_done)
   15984 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   15985 	return rv;
   15986 }
   15987 
   15988 static void
   15989 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   15990 {
   15991 	uint32_t reg;
   15992 
   15993 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15994 		device_xname(sc->sc_dev), __func__));
   15995 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   15996 	    || (sc->sc_type == WM_T_PCH_CNP));
   15997 
   15998 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15999 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16000 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16001 
   16002 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16003 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16004 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16005 }
   16006