Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.645
      1 /*	$NetBSD: if_wm.c,v 1.645 2019/07/30 04:42:29 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.645 2019/07/30 04:42:29 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256U
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 	krndsource_t rnd_source;	/* random source */
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	int sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    592 
    593 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    594 	kmutex_t *sc_ich_phymtx;	/*
    595 					 * 82574/82583/ICH/PCH specific PHY
    596 					 * mutex. For 82574/82583, the mutex
    597 					 * is used for both PHY and NVM.
    598 					 */
    599 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    600 
    601 	struct wm_phyop phy;
    602 	struct wm_nvmop nvm;
    603 };
    604 
    605 #define WM_CORE_LOCK(_sc)						\
    606 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    607 #define WM_CORE_UNLOCK(_sc)						\
    608 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)						\
    610 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    611 
    612 #define	WM_RXCHAIN_RESET(rxq)						\
    613 do {									\
    614 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    615 	*(rxq)->rxq_tailp = NULL;					\
    616 	(rxq)->rxq_len = 0;						\
    617 } while (/*CONSTCOND*/0)
    618 
    619 #define	WM_RXCHAIN_LINK(rxq, m)						\
    620 do {									\
    621 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    622 	(rxq)->rxq_tailp = &(m)->m_next;				\
    623 } while (/*CONSTCOND*/0)
    624 
    625 #ifdef WM_EVENT_COUNTERS
    626 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    627 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    628 
    629 #define WM_Q_EVCNT_INCR(qname, evname)			\
    630 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    631 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    632 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    633 #else /* !WM_EVENT_COUNTERS */
    634 #define	WM_EVCNT_INCR(ev)	/* nothing */
    635 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    636 
    637 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    638 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    639 #endif /* !WM_EVENT_COUNTERS */
    640 
    641 #define	CSR_READ(sc, reg)						\
    642 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    643 #define	CSR_WRITE(sc, reg, val)						\
    644 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    645 #define	CSR_WRITE_FLUSH(sc)						\
    646 	(void)CSR_READ((sc), WMREG_STATUS)
    647 
    648 #define ICH8_FLASH_READ32(sc, reg)					\
    649 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    650 	    (reg) + sc->sc_flashreg_offset)
    651 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    652 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset, (data))
    654 
    655 #define ICH8_FLASH_READ16(sc, reg)					\
    656 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    657 	    (reg) + sc->sc_flashreg_offset)
    658 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    659 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    660 	    (reg) + sc->sc_flashreg_offset, (data))
    661 
    662 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    663 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    664 
    665 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    666 #define	WM_CDTXADDR_HI(txq, x)						\
    667 	(sizeof(bus_addr_t) == 8 ?					\
    668 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    669 
    670 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    671 #define	WM_CDRXADDR_HI(rxq, x)						\
    672 	(sizeof(bus_addr_t) == 8 ?					\
    673 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    674 
    675 /*
    676  * Register read/write functions.
    677  * Other than CSR_{READ|WRITE}().
    678  */
    679 #if 0
    680 static inline uint32_t wm_io_read(struct wm_softc *, int);
    681 #endif
    682 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    683 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    684     uint32_t, uint32_t);
    685 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    686 
    687 /*
    688  * Descriptor sync/init functions.
    689  */
    690 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    691 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    692 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    693 
    694 /*
    695  * Device driver interface functions and commonly used functions.
    696  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    697  */
    698 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    699 static int	wm_match(device_t, cfdata_t, void *);
    700 static void	wm_attach(device_t, device_t, void *);
    701 static int	wm_detach(device_t, int);
    702 static bool	wm_suspend(device_t, const pmf_qual_t *);
    703 static bool	wm_resume(device_t, const pmf_qual_t *);
    704 static void	wm_watchdog(struct ifnet *);
    705 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    706     uint16_t *);
    707 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    708     uint16_t *);
    709 static void	wm_tick(void *);
    710 static int	wm_ifflags_cb(struct ethercom *);
    711 static int	wm_ioctl(struct ifnet *, u_long, void *);
    712 /* MAC address related */
    713 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    714 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    715 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    716 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    717 static int	wm_rar_count(struct wm_softc *);
    718 static void	wm_set_filter(struct wm_softc *);
    719 /* Reset and init related */
    720 static void	wm_set_vlan(struct wm_softc *);
    721 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    722 static void	wm_get_auto_rd_done(struct wm_softc *);
    723 static void	wm_lan_init_done(struct wm_softc *);
    724 static void	wm_get_cfg_done(struct wm_softc *);
    725 static int	wm_phy_post_reset(struct wm_softc *);
    726 static int	wm_write_smbus_addr(struct wm_softc *);
    727 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    728 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    729 static void	wm_initialize_hardware_bits(struct wm_softc *);
    730 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    731 static int	wm_reset_phy(struct wm_softc *);
    732 static void	wm_flush_desc_rings(struct wm_softc *);
    733 static void	wm_reset(struct wm_softc *);
    734 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    735 static void	wm_rxdrain(struct wm_rxqueue *);
    736 static void	wm_init_rss(struct wm_softc *);
    737 static void	wm_adjust_qnum(struct wm_softc *, int);
    738 static inline bool	wm_is_using_msix(struct wm_softc *);
    739 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    740 static int	wm_softint_establish(struct wm_softc *, int, int);
    741 static int	wm_setup_legacy(struct wm_softc *);
    742 static int	wm_setup_msix(struct wm_softc *);
    743 static int	wm_init(struct ifnet *);
    744 static int	wm_init_locked(struct ifnet *);
    745 static void	wm_unset_stopping_flags(struct wm_softc *);
    746 static void	wm_set_stopping_flags(struct wm_softc *);
    747 static void	wm_stop(struct ifnet *, int);
    748 static void	wm_stop_locked(struct ifnet *, int);
    749 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    750 static void	wm_82547_txfifo_stall(void *);
    751 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    752 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    753 /* DMA related */
    754 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    758     struct wm_txqueue *);
    759 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    762     struct wm_rxqueue *);
    763 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    766 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    769 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    770     struct wm_txqueue *);
    771 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    772     struct wm_rxqueue *);
    773 static int	wm_alloc_txrx_queues(struct wm_softc *);
    774 static void	wm_free_txrx_queues(struct wm_softc *);
    775 static int	wm_init_txrx_queues(struct wm_softc *);
    776 /* Start */
    777 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    778     struct wm_txsoft *, uint32_t *, uint8_t *);
    779 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    780 static void	wm_start(struct ifnet *);
    781 static void	wm_start_locked(struct ifnet *);
    782 static int	wm_transmit(struct ifnet *, struct mbuf *);
    783 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    784 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    785     bool);
    786 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    787     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    788 static void	wm_nq_start(struct ifnet *);
    789 static void	wm_nq_start_locked(struct ifnet *);
    790 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    791 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    792 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    793     bool);
    794 static void	wm_deferred_start_locked(struct wm_txqueue *);
    795 static void	wm_handle_queue(void *);
    796 /* Interrupt */
    797 static bool	wm_txeof(struct wm_txqueue *, u_int);
    798 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    799 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    802 static void	wm_linkintr(struct wm_softc *, uint32_t);
    803 static int	wm_intr_legacy(void *);
    804 static inline void	wm_txrxintr_disable(struct wm_queue *);
    805 static inline void	wm_txrxintr_enable(struct wm_queue *);
    806 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    807 static int	wm_txrxintr_msix(void *);
    808 static int	wm_linkintr_msix(void *);
    809 
    810 /*
    811  * Media related.
    812  * GMII, SGMII, TBI, SERDES and SFP.
    813  */
    814 /* Common */
    815 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    816 /* GMII related */
    817 static void	wm_gmii_reset(struct wm_softc *);
    818 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    819 static int	wm_get_phy_id_82575(struct wm_softc *);
    820 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    821 static int	wm_gmii_mediachange(struct ifnet *);
    822 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    823 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    824 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    825 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    826 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    827 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    828 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    829 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    830 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    831 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    833 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    834 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    835 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    836 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    837 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    839 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    840 	bool);
    841 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    842 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    844 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    845 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    847 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    848 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    849 static void	wm_gmii_statchg(struct ifnet *);
    850 /*
    851  * kumeran related (80003, ICH* and PCH*).
    852  * These functions are not for accessing MII registers but for accessing
    853  * kumeran specific registers.
    854  */
    855 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    857 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    858 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    859 /* EMI register related */
    860 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    861 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    862 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    863 /* SGMII */
    864 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    865 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    866 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    867 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    868 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    869 /* TBI related */
    870 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    871 static void	wm_tbi_mediainit(struct wm_softc *);
    872 static int	wm_tbi_mediachange(struct ifnet *);
    873 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    874 static int	wm_check_for_link(struct wm_softc *);
    875 static void	wm_tbi_tick(struct wm_softc *);
    876 /* SERDES related */
    877 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    878 static int	wm_serdes_mediachange(struct ifnet *);
    879 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    880 static void	wm_serdes_tick(struct wm_softc *);
    881 /* SFP related */
    882 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    883 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    884 
    885 /*
    886  * NVM related.
    887  * Microwire, SPI (w/wo EERD) and Flash.
    888  */
    889 /* Misc functions */
    890 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    891 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    892 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    893 /* Microwire */
    894 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    895 /* SPI */
    896 static int	wm_nvm_ready_spi(struct wm_softc *);
    897 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    898 /* Using with EERD */
    899 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    900 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    901 /* Flash */
    902 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    903     unsigned int *);
    904 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    905 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    906 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    907     uint32_t *);
    908 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    909 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    910 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    911 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    912 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    913 /* iNVM */
    914 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    915 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    916 /* Lock, detecting NVM type, validate checksum and read */
    917 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    918 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    919 static int	wm_nvm_validate_checksum(struct wm_softc *);
    920 static void	wm_nvm_version_invm(struct wm_softc *);
    921 static void	wm_nvm_version(struct wm_softc *);
    922 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    923 
    924 /*
    925  * Hardware semaphores.
    926  * Very complexed...
    927  */
    928 static int	wm_get_null(struct wm_softc *);
    929 static void	wm_put_null(struct wm_softc *);
    930 static int	wm_get_eecd(struct wm_softc *);
    931 static void	wm_put_eecd(struct wm_softc *);
    932 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    933 static void	wm_put_swsm_semaphore(struct wm_softc *);
    934 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    936 static int	wm_get_nvm_80003(struct wm_softc *);
    937 static void	wm_put_nvm_80003(struct wm_softc *);
    938 static int	wm_get_nvm_82571(struct wm_softc *);
    939 static void	wm_put_nvm_82571(struct wm_softc *);
    940 static int	wm_get_phy_82575(struct wm_softc *);
    941 static void	wm_put_phy_82575(struct wm_softc *);
    942 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    943 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    944 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    945 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    946 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    947 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    948 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    949 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    950 
    951 /*
    952  * Management mode and power management related subroutines.
    953  * BMC, AMT, suspend/resume and EEE.
    954  */
    955 #if 0
    956 static int	wm_check_mng_mode(struct wm_softc *);
    957 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    958 static int	wm_check_mng_mode_82574(struct wm_softc *);
    959 static int	wm_check_mng_mode_generic(struct wm_softc *);
    960 #endif
    961 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    962 static bool	wm_phy_resetisblocked(struct wm_softc *);
    963 static void	wm_get_hw_control(struct wm_softc *);
    964 static void	wm_release_hw_control(struct wm_softc *);
    965 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    966 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    967 static void	wm_init_manageability(struct wm_softc *);
    968 static void	wm_release_manageability(struct wm_softc *);
    969 static void	wm_get_wakeup(struct wm_softc *);
    970 static int	wm_ulp_disable(struct wm_softc *);
    971 static int	wm_enable_phy_wakeup(struct wm_softc *);
    972 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    973 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    974 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    975 static void	wm_enable_wakeup(struct wm_softc *);
    976 static void	wm_disable_aspm(struct wm_softc *);
    977 /* LPLU (Low Power Link Up) */
    978 static void	wm_lplu_d0_disable(struct wm_softc *);
    979 /* EEE */
    980 static int	wm_set_eee_i350(struct wm_softc *);
    981 static int	wm_set_eee_pchlan(struct wm_softc *);
    982 static int	wm_set_eee(struct wm_softc *);
    983 
    984 /*
    985  * Workarounds (mainly PHY related).
    986  * Basically, PHY's workarounds are in the PHY drivers.
    987  */
    988 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    989 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    990 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    991 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    992 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    993 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    994 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    995 static int	wm_k1_workaround_lv(struct wm_softc *);
    996 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    997 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    998 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    999 static void	wm_reset_init_script_82575(struct wm_softc *);
   1000 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1001 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1002 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1003 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1004 static int	wm_pll_workaround_i210(struct wm_softc *);
   1005 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1006 
   1007 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1008     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1009 
   1010 /*
   1011  * Devices supported by this driver.
   1012  */
   1013 static const struct wm_product {
   1014 	pci_vendor_id_t		wmp_vendor;
   1015 	pci_product_id_t	wmp_product;
   1016 	const char		*wmp_name;
   1017 	wm_chip_type		wmp_type;
   1018 	uint32_t		wmp_flags;
   1019 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1020 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1021 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1022 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1023 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1024 } wm_products[] = {
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1026 	  "Intel i82542 1000BASE-X Ethernet",
   1027 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1030 	  "Intel i82543GC 1000BASE-X Ethernet",
   1031 	  WM_T_82543,		WMP_F_FIBER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1034 	  "Intel i82543GC 1000BASE-T Ethernet",
   1035 	  WM_T_82543,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1038 	  "Intel i82544EI 1000BASE-T Ethernet",
   1039 	  WM_T_82544,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1042 	  "Intel i82544EI 1000BASE-X Ethernet",
   1043 	  WM_T_82544,		WMP_F_FIBER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1046 	  "Intel i82544GC 1000BASE-T Ethernet",
   1047 	  WM_T_82544,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1050 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1051 	  WM_T_82544,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1054 	  "Intel i82540EM 1000BASE-T Ethernet",
   1055 	  WM_T_82540,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1058 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1059 	  WM_T_82540,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1062 	  "Intel i82540EP 1000BASE-T Ethernet",
   1063 	  WM_T_82540,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1066 	  "Intel i82540EP 1000BASE-T Ethernet",
   1067 	  WM_T_82540,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1070 	  "Intel i82540EP 1000BASE-T Ethernet",
   1071 	  WM_T_82540,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1074 	  "Intel i82545EM 1000BASE-T Ethernet",
   1075 	  WM_T_82545,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1078 	  "Intel i82545GM 1000BASE-T Ethernet",
   1079 	  WM_T_82545_3,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1082 	  "Intel i82545GM 1000BASE-X Ethernet",
   1083 	  WM_T_82545_3,		WMP_F_FIBER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1086 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1087 	  WM_T_82545_3,		WMP_F_SERDES },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1090 	  "Intel i82546EB 1000BASE-T Ethernet",
   1091 	  WM_T_82546,		WMP_F_COPPER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1094 	  "Intel i82546EB 1000BASE-T Ethernet",
   1095 	  WM_T_82546,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1098 	  "Intel i82545EM 1000BASE-X Ethernet",
   1099 	  WM_T_82545,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1102 	  "Intel i82546EB 1000BASE-X Ethernet",
   1103 	  WM_T_82546,		WMP_F_FIBER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1106 	  "Intel i82546GB 1000BASE-T Ethernet",
   1107 	  WM_T_82546_3,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1110 	  "Intel i82546GB 1000BASE-X Ethernet",
   1111 	  WM_T_82546_3,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1114 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1115 	  WM_T_82546_3,		WMP_F_SERDES },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1118 	  "i82546GB quad-port Gigabit Ethernet",
   1119 	  WM_T_82546_3,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1122 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1123 	  WM_T_82546_3,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1126 	  "Intel PRO/1000MT (82546GB)",
   1127 	  WM_T_82546_3,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1130 	  "Intel i82541EI 1000BASE-T Ethernet",
   1131 	  WM_T_82541,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1134 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1135 	  WM_T_82541,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1138 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1139 	  WM_T_82541,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1142 	  "Intel i82541ER 1000BASE-T Ethernet",
   1143 	  WM_T_82541_2,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1146 	  "Intel i82541GI 1000BASE-T Ethernet",
   1147 	  WM_T_82541_2,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1150 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1151 	  WM_T_82541_2,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1154 	  "Intel i82541PI 1000BASE-T Ethernet",
   1155 	  WM_T_82541_2,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1158 	  "Intel i82547EI 1000BASE-T Ethernet",
   1159 	  WM_T_82547,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1162 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1163 	  WM_T_82547,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1166 	  "Intel i82547GI 1000BASE-T Ethernet",
   1167 	  WM_T_82547_2,		WMP_F_COPPER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1170 	  "Intel PRO/1000 PT (82571EB)",
   1171 	  WM_T_82571,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1174 	  "Intel PRO/1000 PF (82571EB)",
   1175 	  WM_T_82571,		WMP_F_FIBER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1178 	  "Intel PRO/1000 PB (82571EB)",
   1179 	  WM_T_82571,		WMP_F_SERDES },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1182 	  "Intel PRO/1000 QT (82571EB)",
   1183 	  WM_T_82571,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1186 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1187 	  WM_T_82571,		WMP_F_COPPER, },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1190 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1191 	  WM_T_82571,		WMP_F_COPPER, },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1194 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1195 	  WM_T_82571,		WMP_F_SERDES, },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1198 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1199 	  WM_T_82571,		WMP_F_SERDES, },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1202 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1203 	  WM_T_82571,		WMP_F_FIBER, },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1206 	  "Intel i82572EI 1000baseT Ethernet",
   1207 	  WM_T_82572,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1210 	  "Intel i82572EI 1000baseX Ethernet",
   1211 	  WM_T_82572,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1214 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1215 	  WM_T_82572,		WMP_F_SERDES },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1218 	  "Intel i82572EI 1000baseT Ethernet",
   1219 	  WM_T_82572,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1222 	  "Intel i82573E",
   1223 	  WM_T_82573,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1226 	  "Intel i82573E IAMT",
   1227 	  WM_T_82573,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1230 	  "Intel i82573L Gigabit Ethernet",
   1231 	  WM_T_82573,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1234 	  "Intel i82574L",
   1235 	  WM_T_82574,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1238 	  "Intel i82574L",
   1239 	  WM_T_82574,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1242 	  "Intel i82583V",
   1243 	  WM_T_82583,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1246 	  "i80003 dual 1000baseT Ethernet",
   1247 	  WM_T_80003,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1250 	  "i80003 dual 1000baseX Ethernet",
   1251 	  WM_T_80003,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1254 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1255 	  WM_T_80003,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1258 	  "Intel i80003 1000baseT Ethernet",
   1259 	  WM_T_80003,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1262 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1263 	  WM_T_80003,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1266 	  "Intel i82801H (M_AMT) LAN Controller",
   1267 	  WM_T_ICH8,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1269 	  "Intel i82801H (AMT) LAN Controller",
   1270 	  WM_T_ICH8,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1272 	  "Intel i82801H LAN Controller",
   1273 	  WM_T_ICH8,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1275 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1276 	  WM_T_ICH8,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1278 	  "Intel i82801H (M) LAN Controller",
   1279 	  WM_T_ICH8,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1281 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1282 	  WM_T_ICH8,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1284 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1285 	  WM_T_ICH8,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1287 	  "82567V-3 LAN Controller",
   1288 	  WM_T_ICH8,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1290 	  "82801I (AMT) LAN Controller",
   1291 	  WM_T_ICH9,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1293 	  "82801I 10/100 LAN Controller",
   1294 	  WM_T_ICH9,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1296 	  "82801I (G) 10/100 LAN Controller",
   1297 	  WM_T_ICH9,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1299 	  "82801I (GT) 10/100 LAN Controller",
   1300 	  WM_T_ICH9,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1302 	  "82801I (C) LAN Controller",
   1303 	  WM_T_ICH9,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1305 	  "82801I mobile LAN Controller",
   1306 	  WM_T_ICH9,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1308 	  "82801I mobile (V) LAN Controller",
   1309 	  WM_T_ICH9,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1311 	  "82801I mobile (AMT) LAN Controller",
   1312 	  WM_T_ICH9,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1314 	  "82567LM-4 LAN Controller",
   1315 	  WM_T_ICH9,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1317 	  "82567LM-2 LAN Controller",
   1318 	  WM_T_ICH10,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1320 	  "82567LF-2 LAN Controller",
   1321 	  WM_T_ICH10,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1323 	  "82567LM-3 LAN Controller",
   1324 	  WM_T_ICH10,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1326 	  "82567LF-3 LAN Controller",
   1327 	  WM_T_ICH10,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1329 	  "82567V-2 LAN Controller",
   1330 	  WM_T_ICH10,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1332 	  "82567V-3? LAN Controller",
   1333 	  WM_T_ICH10,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1335 	  "HANKSVILLE LAN Controller",
   1336 	  WM_T_ICH10,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1338 	  "PCH LAN (82577LM) Controller",
   1339 	  WM_T_PCH,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1341 	  "PCH LAN (82577LC) Controller",
   1342 	  WM_T_PCH,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1344 	  "PCH LAN (82578DM) Controller",
   1345 	  WM_T_PCH,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1347 	  "PCH LAN (82578DC) Controller",
   1348 	  WM_T_PCH,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1350 	  "PCH2 LAN (82579LM) Controller",
   1351 	  WM_T_PCH2,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1353 	  "PCH2 LAN (82579V) Controller",
   1354 	  WM_T_PCH2,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1356 	  "82575EB dual-1000baseT Ethernet",
   1357 	  WM_T_82575,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1359 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1360 	  WM_T_82575,		WMP_F_SERDES },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1362 	  "82575GB quad-1000baseT Ethernet",
   1363 	  WM_T_82575,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1365 	  "82575GB quad-1000baseT Ethernet (PM)",
   1366 	  WM_T_82575,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1368 	  "82576 1000BaseT Ethernet",
   1369 	  WM_T_82576,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1371 	  "82576 1000BaseX Ethernet",
   1372 	  WM_T_82576,		WMP_F_FIBER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1375 	  "82576 gigabit Ethernet (SERDES)",
   1376 	  WM_T_82576,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1379 	  "82576 quad-1000BaseT Ethernet",
   1380 	  WM_T_82576,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1383 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1384 	  WM_T_82576,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1387 	  "82576 gigabit Ethernet",
   1388 	  WM_T_82576,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1391 	  "82576 gigabit Ethernet (SERDES)",
   1392 	  WM_T_82576,		WMP_F_SERDES },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1394 	  "82576 quad-gigabit Ethernet (SERDES)",
   1395 	  WM_T_82576,		WMP_F_SERDES },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1398 	  "82580 1000BaseT Ethernet",
   1399 	  WM_T_82580,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1401 	  "82580 1000BaseX Ethernet",
   1402 	  WM_T_82580,		WMP_F_FIBER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1405 	  "82580 1000BaseT Ethernet (SERDES)",
   1406 	  WM_T_82580,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1409 	  "82580 gigabit Ethernet (SGMII)",
   1410 	  WM_T_82580,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1412 	  "82580 dual-1000BaseT Ethernet",
   1413 	  WM_T_82580,		WMP_F_COPPER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1416 	  "82580 quad-1000BaseX Ethernet",
   1417 	  WM_T_82580,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1420 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1421 	  WM_T_82580,		WMP_F_COPPER },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1424 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1425 	  WM_T_82580,		WMP_F_SERDES },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1428 	  "DH89XXCC 1000BASE-KX Ethernet",
   1429 	  WM_T_82580,		WMP_F_SERDES },
   1430 
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1432 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1433 	  WM_T_82580,		WMP_F_SERDES },
   1434 
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1436 	  "I350 Gigabit Network Connection",
   1437 	  WM_T_I350,		WMP_F_COPPER },
   1438 
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1440 	  "I350 Gigabit Fiber Network Connection",
   1441 	  WM_T_I350,		WMP_F_FIBER },
   1442 
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1444 	  "I350 Gigabit Backplane Connection",
   1445 	  WM_T_I350,		WMP_F_SERDES },
   1446 
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1448 	  "I350 Quad Port Gigabit Ethernet",
   1449 	  WM_T_I350,		WMP_F_SERDES },
   1450 
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1452 	  "I350 Gigabit Connection",
   1453 	  WM_T_I350,		WMP_F_COPPER },
   1454 
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1456 	  "I354 Gigabit Ethernet (KX)",
   1457 	  WM_T_I354,		WMP_F_SERDES },
   1458 
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1460 	  "I354 Gigabit Ethernet (SGMII)",
   1461 	  WM_T_I354,		WMP_F_COPPER },
   1462 
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1464 	  "I354 Gigabit Ethernet (2.5G)",
   1465 	  WM_T_I354,		WMP_F_COPPER },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1468 	  "I210-T1 Ethernet Server Adapter",
   1469 	  WM_T_I210,		WMP_F_COPPER },
   1470 
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1472 	  "I210 Ethernet (Copper OEM)",
   1473 	  WM_T_I210,		WMP_F_COPPER },
   1474 
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1476 	  "I210 Ethernet (Copper IT)",
   1477 	  WM_T_I210,		WMP_F_COPPER },
   1478 
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1480 	  "I210 Ethernet (Copper, FLASH less)",
   1481 	  WM_T_I210,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1484 	  "I210 Gigabit Ethernet (Fiber)",
   1485 	  WM_T_I210,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1488 	  "I210 Gigabit Ethernet (SERDES)",
   1489 	  WM_T_I210,		WMP_F_SERDES },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1492 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1493 	  WM_T_I210,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1496 	  "I210 Gigabit Ethernet (SGMII)",
   1497 	  WM_T_I210,		WMP_F_COPPER },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1500 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1501 	  WM_T_I210,		WMP_F_COPPER },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1504 	  "I211 Ethernet (COPPER)",
   1505 	  WM_T_I211,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1507 	  "I217 V Ethernet Connection",
   1508 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1510 	  "I217 LM Ethernet Connection",
   1511 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1513 	  "I218 V Ethernet Connection",
   1514 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1516 	  "I218 V Ethernet Connection",
   1517 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1519 	  "I218 V Ethernet Connection",
   1520 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1522 	  "I218 LM Ethernet Connection",
   1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1525 	  "I218 LM Ethernet Connection",
   1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1528 	  "I218 LM Ethernet Connection",
   1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1531 	  "I219 LM Ethernet Connection",
   1532 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1537 	  "I219 LM Ethernet Connection",
   1538 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1540 	  "I219 LM Ethernet Connection",
   1541 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1543 	  "I219 LM Ethernet Connection",
   1544 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1549 	  "I219 LM Ethernet Connection",
   1550 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1552 	  "I219 LM Ethernet Connection",
   1553 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1555 	  "I219 LM Ethernet Connection",
   1556 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1558 	  "I219 V Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1561 	  "I219 V Ethernet Connection",
   1562 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1564 	  "I219 V Ethernet Connection",
   1565 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1567 	  "I219 V Ethernet Connection",
   1568 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1570 	  "I219 V Ethernet Connection",
   1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1573 	  "I219 V Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1576 	  "I219 V Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1579 	  "I219 V Ethernet Connection",
   1580 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1581 	{ 0,			0,
   1582 	  NULL,
   1583 	  0,			0 },
   1584 };
   1585 
   1586 /*
   1587  * Register read/write functions.
   1588  * Other than CSR_{READ|WRITE}().
   1589  */
   1590 
   1591 #if 0 /* Not currently used */
   1592 static inline uint32_t
   1593 wm_io_read(struct wm_softc *sc, int reg)
   1594 {
   1595 
   1596 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1597 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1598 }
   1599 #endif
   1600 
   1601 static inline void
   1602 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1603 {
   1604 
   1605 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1607 }
   1608 
   1609 static inline void
   1610 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1611     uint32_t data)
   1612 {
   1613 	uint32_t regval;
   1614 	int i;
   1615 
   1616 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1617 
   1618 	CSR_WRITE(sc, reg, regval);
   1619 
   1620 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1621 		delay(5);
   1622 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1623 			break;
   1624 	}
   1625 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1626 		aprint_error("%s: WARNING:"
   1627 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1628 		    device_xname(sc->sc_dev), reg);
   1629 	}
   1630 }
   1631 
   1632 static inline void
   1633 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1634 {
   1635 	wa->wa_low = htole32(v & 0xffffffffU);
   1636 	if (sizeof(bus_addr_t) == 8)
   1637 		wa->wa_high = htole32((uint64_t) v >> 32);
   1638 	else
   1639 		wa->wa_high = 0;
   1640 }
   1641 
   1642 /*
   1643  * Descriptor sync/init functions.
   1644  */
   1645 static inline void
   1646 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1647 {
   1648 	struct wm_softc *sc = txq->txq_sc;
   1649 
   1650 	/* If it will wrap around, sync to the end of the ring. */
   1651 	if ((start + num) > WM_NTXDESC(txq)) {
   1652 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1653 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1654 		    (WM_NTXDESC(txq) - start), ops);
   1655 		num -= (WM_NTXDESC(txq) - start);
   1656 		start = 0;
   1657 	}
   1658 
   1659 	/* Now sync whatever is left. */
   1660 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1661 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1662 }
   1663 
   1664 static inline void
   1665 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1666 {
   1667 	struct wm_softc *sc = rxq->rxq_sc;
   1668 
   1669 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1670 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1671 }
   1672 
   1673 static inline void
   1674 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1675 {
   1676 	struct wm_softc *sc = rxq->rxq_sc;
   1677 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1678 	struct mbuf *m = rxs->rxs_mbuf;
   1679 
   1680 	/*
   1681 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1682 	 * so that the payload after the Ethernet header is aligned
   1683 	 * to a 4-byte boundary.
   1684 
   1685 	 * XXX BRAINDAMAGE ALERT!
   1686 	 * The stupid chip uses the same size for every buffer, which
   1687 	 * is set in the Receive Control register.  We are using the 2K
   1688 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1689 	 * reason, we can't "scoot" packets longer than the standard
   1690 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1691 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1692 	 * the upper layer copy the headers.
   1693 	 */
   1694 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1695 
   1696 	if (sc->sc_type == WM_T_82574) {
   1697 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1698 		rxd->erx_data.erxd_addr =
   1699 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1700 		rxd->erx_data.erxd_dd = 0;
   1701 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1702 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1703 
   1704 		rxd->nqrx_data.nrxd_paddr =
   1705 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1706 		/* Currently, split header is not supported. */
   1707 		rxd->nqrx_data.nrxd_haddr = 0;
   1708 	} else {
   1709 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1710 
   1711 		wm_set_dma_addr(&rxd->wrx_addr,
   1712 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1713 		rxd->wrx_len = 0;
   1714 		rxd->wrx_cksum = 0;
   1715 		rxd->wrx_status = 0;
   1716 		rxd->wrx_errors = 0;
   1717 		rxd->wrx_special = 0;
   1718 	}
   1719 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1720 
   1721 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1722 }
   1723 
   1724 /*
   1725  * Device driver interface functions and commonly used functions.
   1726  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1727  */
   1728 
   1729 /* Lookup supported device table */
   1730 static const struct wm_product *
   1731 wm_lookup(const struct pci_attach_args *pa)
   1732 {
   1733 	const struct wm_product *wmp;
   1734 
   1735 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1736 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1737 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1738 			return wmp;
   1739 	}
   1740 	return NULL;
   1741 }
   1742 
   1743 /* The match function (ca_match) */
   1744 static int
   1745 wm_match(device_t parent, cfdata_t cf, void *aux)
   1746 {
   1747 	struct pci_attach_args *pa = aux;
   1748 
   1749 	if (wm_lookup(pa) != NULL)
   1750 		return 1;
   1751 
   1752 	return 0;
   1753 }
   1754 
   1755 /* The attach function (ca_attach) */
   1756 static void
   1757 wm_attach(device_t parent, device_t self, void *aux)
   1758 {
   1759 	struct wm_softc *sc = device_private(self);
   1760 	struct pci_attach_args *pa = aux;
   1761 	prop_dictionary_t dict;
   1762 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1763 	pci_chipset_tag_t pc = pa->pa_pc;
   1764 	int counts[PCI_INTR_TYPE_SIZE];
   1765 	pci_intr_type_t max_type;
   1766 	const char *eetype, *xname;
   1767 	bus_space_tag_t memt;
   1768 	bus_space_handle_t memh;
   1769 	bus_size_t memsize;
   1770 	int memh_valid;
   1771 	int i, error;
   1772 	const struct wm_product *wmp;
   1773 	prop_data_t ea;
   1774 	prop_number_t pn;
   1775 	uint8_t enaddr[ETHER_ADDR_LEN];
   1776 	char buf[256];
   1777 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1778 	pcireg_t preg, memtype;
   1779 	uint16_t eeprom_data, apme_mask;
   1780 	bool force_clear_smbi;
   1781 	uint32_t link_mode;
   1782 	uint32_t reg;
   1783 
   1784 	sc->sc_dev = self;
   1785 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1786 	sc->sc_core_stopping = false;
   1787 
   1788 	wmp = wm_lookup(pa);
   1789 #ifdef DIAGNOSTIC
   1790 	if (wmp == NULL) {
   1791 		printf("\n");
   1792 		panic("wm_attach: impossible");
   1793 	}
   1794 #endif
   1795 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1796 
   1797 	sc->sc_pc = pa->pa_pc;
   1798 	sc->sc_pcitag = pa->pa_tag;
   1799 
   1800 	if (pci_dma64_available(pa))
   1801 		sc->sc_dmat = pa->pa_dmat64;
   1802 	else
   1803 		sc->sc_dmat = pa->pa_dmat;
   1804 
   1805 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1806 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1807 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1808 
   1809 	sc->sc_type = wmp->wmp_type;
   1810 
   1811 	/* Set default function pointers */
   1812 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1813 	sc->phy.release = sc->nvm.release = wm_put_null;
   1814 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1815 
   1816 	if (sc->sc_type < WM_T_82543) {
   1817 		if (sc->sc_rev < 2) {
   1818 			aprint_error_dev(sc->sc_dev,
   1819 			    "i82542 must be at least rev. 2\n");
   1820 			return;
   1821 		}
   1822 		if (sc->sc_rev < 3)
   1823 			sc->sc_type = WM_T_82542_2_0;
   1824 	}
   1825 
   1826 	/*
   1827 	 * Disable MSI for Errata:
   1828 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1829 	 *
   1830 	 *  82544: Errata 25
   1831 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1832 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1833 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1834 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1835 	 *
   1836 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1837 	 *
   1838 	 *  82571 & 82572: Errata 63
   1839 	 */
   1840 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1841 	    || (sc->sc_type == WM_T_82572))
   1842 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1843 
   1844 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1845 	    || (sc->sc_type == WM_T_82580)
   1846 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1847 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1848 		sc->sc_flags |= WM_F_NEWQUEUE;
   1849 
   1850 	/* Set device properties (mactype) */
   1851 	dict = device_properties(sc->sc_dev);
   1852 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1853 
   1854 	/*
   1855 	 * Map the device.  All devices support memory-mapped acccess,
   1856 	 * and it is really required for normal operation.
   1857 	 */
   1858 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1859 	switch (memtype) {
   1860 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1862 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1863 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1864 		break;
   1865 	default:
   1866 		memh_valid = 0;
   1867 		break;
   1868 	}
   1869 
   1870 	if (memh_valid) {
   1871 		sc->sc_st = memt;
   1872 		sc->sc_sh = memh;
   1873 		sc->sc_ss = memsize;
   1874 	} else {
   1875 		aprint_error_dev(sc->sc_dev,
   1876 		    "unable to map device registers\n");
   1877 		return;
   1878 	}
   1879 
   1880 	/*
   1881 	 * In addition, i82544 and later support I/O mapped indirect
   1882 	 * register access.  It is not desirable (nor supported in
   1883 	 * this driver) to use it for normal operation, though it is
   1884 	 * required to work around bugs in some chip versions.
   1885 	 */
   1886 	if (sc->sc_type >= WM_T_82544) {
   1887 		/* First we have to find the I/O BAR. */
   1888 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1889 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1890 			if (memtype == PCI_MAPREG_TYPE_IO)
   1891 				break;
   1892 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1893 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1894 				i += 4;	/* skip high bits, too */
   1895 		}
   1896 		if (i < PCI_MAPREG_END) {
   1897 			/*
   1898 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1899 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1900 			 * It's no problem because newer chips has no this
   1901 			 * bug.
   1902 			 *
   1903 			 * The i8254x doesn't apparently respond when the
   1904 			 * I/O BAR is 0, which looks somewhat like it's not
   1905 			 * been configured.
   1906 			 */
   1907 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1908 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1909 				aprint_error_dev(sc->sc_dev,
   1910 				    "WARNING: I/O BAR at zero.\n");
   1911 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1912 					0, &sc->sc_iot, &sc->sc_ioh,
   1913 					NULL, &sc->sc_ios) == 0) {
   1914 				sc->sc_flags |= WM_F_IOH_VALID;
   1915 			} else
   1916 				aprint_error_dev(sc->sc_dev,
   1917 				    "WARNING: unable to map I/O space\n");
   1918 		}
   1919 
   1920 	}
   1921 
   1922 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1923 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1924 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1925 	if (sc->sc_type < WM_T_82542_2_1)
   1926 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1927 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1928 
   1929 	/* Power up chip */
   1930 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1931 	    && error != EOPNOTSUPP) {
   1932 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1933 		return;
   1934 	}
   1935 
   1936 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1937 	/*
   1938 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1939 	 * resource.
   1940 	 */
   1941 	if (sc->sc_nqueues > 1) {
   1942 		max_type = PCI_INTR_TYPE_MSIX;
   1943 		/*
   1944 		 *  82583 has a MSI-X capability in the PCI configuration space
   1945 		 * but it doesn't support it. At least the document doesn't
   1946 		 * say anything about MSI-X.
   1947 		 */
   1948 		counts[PCI_INTR_TYPE_MSIX]
   1949 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1950 	} else {
   1951 		max_type = PCI_INTR_TYPE_MSI;
   1952 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1953 	}
   1954 
   1955 	/* Allocation settings */
   1956 	counts[PCI_INTR_TYPE_MSI] = 1;
   1957 	counts[PCI_INTR_TYPE_INTX] = 1;
   1958 	/* overridden by disable flags */
   1959 	if (wm_disable_msi != 0) {
   1960 		counts[PCI_INTR_TYPE_MSI] = 0;
   1961 		if (wm_disable_msix != 0) {
   1962 			max_type = PCI_INTR_TYPE_INTX;
   1963 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1964 		}
   1965 	} else if (wm_disable_msix != 0) {
   1966 		max_type = PCI_INTR_TYPE_MSI;
   1967 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1968 	}
   1969 
   1970 alloc_retry:
   1971 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1972 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1973 		return;
   1974 	}
   1975 
   1976 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1977 		error = wm_setup_msix(sc);
   1978 		if (error) {
   1979 			pci_intr_release(pc, sc->sc_intrs,
   1980 			    counts[PCI_INTR_TYPE_MSIX]);
   1981 
   1982 			/* Setup for MSI: Disable MSI-X */
   1983 			max_type = PCI_INTR_TYPE_MSI;
   1984 			counts[PCI_INTR_TYPE_MSI] = 1;
   1985 			counts[PCI_INTR_TYPE_INTX] = 1;
   1986 			goto alloc_retry;
   1987 		}
   1988 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1989 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   1990 		error = wm_setup_legacy(sc);
   1991 		if (error) {
   1992 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1993 			    counts[PCI_INTR_TYPE_MSI]);
   1994 
   1995 			/* The next try is for INTx: Disable MSI */
   1996 			max_type = PCI_INTR_TYPE_INTX;
   1997 			counts[PCI_INTR_TYPE_INTX] = 1;
   1998 			goto alloc_retry;
   1999 		}
   2000 	} else {
   2001 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2002 		error = wm_setup_legacy(sc);
   2003 		if (error) {
   2004 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2005 			    counts[PCI_INTR_TYPE_INTX]);
   2006 			return;
   2007 		}
   2008 	}
   2009 
   2010 	/*
   2011 	 * Check the function ID (unit number of the chip).
   2012 	 */
   2013 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2014 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2015 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2016 	    || (sc->sc_type == WM_T_82580)
   2017 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2018 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2019 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2020 	else
   2021 		sc->sc_funcid = 0;
   2022 
   2023 	/*
   2024 	 * Determine a few things about the bus we're connected to.
   2025 	 */
   2026 	if (sc->sc_type < WM_T_82543) {
   2027 		/* We don't really know the bus characteristics here. */
   2028 		sc->sc_bus_speed = 33;
   2029 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2030 		/*
   2031 		 * CSA (Communication Streaming Architecture) is about as fast
   2032 		 * a 32-bit 66MHz PCI Bus.
   2033 		 */
   2034 		sc->sc_flags |= WM_F_CSA;
   2035 		sc->sc_bus_speed = 66;
   2036 		aprint_verbose_dev(sc->sc_dev,
   2037 		    "Communication Streaming Architecture\n");
   2038 		if (sc->sc_type == WM_T_82547) {
   2039 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2040 			callout_setfunc(&sc->sc_txfifo_ch,
   2041 			    wm_82547_txfifo_stall, sc);
   2042 			aprint_verbose_dev(sc->sc_dev,
   2043 			    "using 82547 Tx FIFO stall work-around\n");
   2044 		}
   2045 	} else if (sc->sc_type >= WM_T_82571) {
   2046 		sc->sc_flags |= WM_F_PCIE;
   2047 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2048 		    && (sc->sc_type != WM_T_ICH10)
   2049 		    && (sc->sc_type != WM_T_PCH)
   2050 		    && (sc->sc_type != WM_T_PCH2)
   2051 		    && (sc->sc_type != WM_T_PCH_LPT)
   2052 		    && (sc->sc_type != WM_T_PCH_SPT)
   2053 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2054 			/* ICH* and PCH* have no PCIe capability registers */
   2055 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2056 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2057 				NULL) == 0)
   2058 				aprint_error_dev(sc->sc_dev,
   2059 				    "unable to find PCIe capability\n");
   2060 		}
   2061 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2062 	} else {
   2063 		reg = CSR_READ(sc, WMREG_STATUS);
   2064 		if (reg & STATUS_BUS64)
   2065 			sc->sc_flags |= WM_F_BUS64;
   2066 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2067 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2068 
   2069 			sc->sc_flags |= WM_F_PCIX;
   2070 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2071 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2072 				aprint_error_dev(sc->sc_dev,
   2073 				    "unable to find PCIX capability\n");
   2074 			else if (sc->sc_type != WM_T_82545_3 &&
   2075 				 sc->sc_type != WM_T_82546_3) {
   2076 				/*
   2077 				 * Work around a problem caused by the BIOS
   2078 				 * setting the max memory read byte count
   2079 				 * incorrectly.
   2080 				 */
   2081 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2082 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2083 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2084 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2085 
   2086 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2087 				    PCIX_CMD_BYTECNT_SHIFT;
   2088 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2089 				    PCIX_STATUS_MAXB_SHIFT;
   2090 				if (bytecnt > maxb) {
   2091 					aprint_verbose_dev(sc->sc_dev,
   2092 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2093 					    512 << bytecnt, 512 << maxb);
   2094 					pcix_cmd = (pcix_cmd &
   2095 					    ~PCIX_CMD_BYTECNT_MASK) |
   2096 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2097 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2098 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2099 					    pcix_cmd);
   2100 				}
   2101 			}
   2102 		}
   2103 		/*
   2104 		 * The quad port adapter is special; it has a PCIX-PCIX
   2105 		 * bridge on the board, and can run the secondary bus at
   2106 		 * a higher speed.
   2107 		 */
   2108 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2109 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2110 								      : 66;
   2111 		} else if (sc->sc_flags & WM_F_PCIX) {
   2112 			switch (reg & STATUS_PCIXSPD_MASK) {
   2113 			case STATUS_PCIXSPD_50_66:
   2114 				sc->sc_bus_speed = 66;
   2115 				break;
   2116 			case STATUS_PCIXSPD_66_100:
   2117 				sc->sc_bus_speed = 100;
   2118 				break;
   2119 			case STATUS_PCIXSPD_100_133:
   2120 				sc->sc_bus_speed = 133;
   2121 				break;
   2122 			default:
   2123 				aprint_error_dev(sc->sc_dev,
   2124 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2125 				    reg & STATUS_PCIXSPD_MASK);
   2126 				sc->sc_bus_speed = 66;
   2127 				break;
   2128 			}
   2129 		} else
   2130 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2131 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2132 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2133 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2134 	}
   2135 
   2136 	/* clear interesting stat counters */
   2137 	CSR_READ(sc, WMREG_COLC);
   2138 	CSR_READ(sc, WMREG_RXERRC);
   2139 
   2140 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2141 	    || (sc->sc_type >= WM_T_ICH8))
   2142 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2143 	if (sc->sc_type >= WM_T_ICH8)
   2144 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2145 
   2146 	/* Set PHY, NVM mutex related stuff */
   2147 	switch (sc->sc_type) {
   2148 	case WM_T_82542_2_0:
   2149 	case WM_T_82542_2_1:
   2150 	case WM_T_82543:
   2151 	case WM_T_82544:
   2152 		/* Microwire */
   2153 		sc->nvm.read = wm_nvm_read_uwire;
   2154 		sc->sc_nvm_wordsize = 64;
   2155 		sc->sc_nvm_addrbits = 6;
   2156 		break;
   2157 	case WM_T_82540:
   2158 	case WM_T_82545:
   2159 	case WM_T_82545_3:
   2160 	case WM_T_82546:
   2161 	case WM_T_82546_3:
   2162 		/* Microwire */
   2163 		sc->nvm.read = wm_nvm_read_uwire;
   2164 		reg = CSR_READ(sc, WMREG_EECD);
   2165 		if (reg & EECD_EE_SIZE) {
   2166 			sc->sc_nvm_wordsize = 256;
   2167 			sc->sc_nvm_addrbits = 8;
   2168 		} else {
   2169 			sc->sc_nvm_wordsize = 64;
   2170 			sc->sc_nvm_addrbits = 6;
   2171 		}
   2172 		sc->sc_flags |= WM_F_LOCK_EECD;
   2173 		sc->nvm.acquire = wm_get_eecd;
   2174 		sc->nvm.release = wm_put_eecd;
   2175 		break;
   2176 	case WM_T_82541:
   2177 	case WM_T_82541_2:
   2178 	case WM_T_82547:
   2179 	case WM_T_82547_2:
   2180 		reg = CSR_READ(sc, WMREG_EECD);
   2181 		/*
   2182 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2183 		 * on 8254[17], so set flags and functios before calling it.
   2184 		 */
   2185 		sc->sc_flags |= WM_F_LOCK_EECD;
   2186 		sc->nvm.acquire = wm_get_eecd;
   2187 		sc->nvm.release = wm_put_eecd;
   2188 		if (reg & EECD_EE_TYPE) {
   2189 			/* SPI */
   2190 			sc->nvm.read = wm_nvm_read_spi;
   2191 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2192 			wm_nvm_set_addrbits_size_eecd(sc);
   2193 		} else {
   2194 			/* Microwire */
   2195 			sc->nvm.read = wm_nvm_read_uwire;
   2196 			if ((reg & EECD_EE_ABITS) != 0) {
   2197 				sc->sc_nvm_wordsize = 256;
   2198 				sc->sc_nvm_addrbits = 8;
   2199 			} else {
   2200 				sc->sc_nvm_wordsize = 64;
   2201 				sc->sc_nvm_addrbits = 6;
   2202 			}
   2203 		}
   2204 		break;
   2205 	case WM_T_82571:
   2206 	case WM_T_82572:
   2207 		/* SPI */
   2208 		sc->nvm.read = wm_nvm_read_eerd;
   2209 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2210 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2211 		wm_nvm_set_addrbits_size_eecd(sc);
   2212 		sc->phy.acquire = wm_get_swsm_semaphore;
   2213 		sc->phy.release = wm_put_swsm_semaphore;
   2214 		sc->nvm.acquire = wm_get_nvm_82571;
   2215 		sc->nvm.release = wm_put_nvm_82571;
   2216 		break;
   2217 	case WM_T_82573:
   2218 	case WM_T_82574:
   2219 	case WM_T_82583:
   2220 		sc->nvm.read = wm_nvm_read_eerd;
   2221 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2222 		if (sc->sc_type == WM_T_82573) {
   2223 			sc->phy.acquire = wm_get_swsm_semaphore;
   2224 			sc->phy.release = wm_put_swsm_semaphore;
   2225 			sc->nvm.acquire = wm_get_nvm_82571;
   2226 			sc->nvm.release = wm_put_nvm_82571;
   2227 		} else {
   2228 			/* Both PHY and NVM use the same semaphore. */
   2229 			sc->phy.acquire = sc->nvm.acquire
   2230 			    = wm_get_swfwhw_semaphore;
   2231 			sc->phy.release = sc->nvm.release
   2232 			    = wm_put_swfwhw_semaphore;
   2233 		}
   2234 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2235 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2236 			sc->sc_nvm_wordsize = 2048;
   2237 		} else {
   2238 			/* SPI */
   2239 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2240 			wm_nvm_set_addrbits_size_eecd(sc);
   2241 		}
   2242 		break;
   2243 	case WM_T_82575:
   2244 	case WM_T_82576:
   2245 	case WM_T_82580:
   2246 	case WM_T_I350:
   2247 	case WM_T_I354:
   2248 	case WM_T_80003:
   2249 		/* SPI */
   2250 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2251 		wm_nvm_set_addrbits_size_eecd(sc);
   2252 		if ((sc->sc_type == WM_T_80003)
   2253 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2254 			sc->nvm.read = wm_nvm_read_eerd;
   2255 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2256 		} else {
   2257 			sc->nvm.read = wm_nvm_read_spi;
   2258 			sc->sc_flags |= WM_F_LOCK_EECD;
   2259 		}
   2260 		sc->phy.acquire = wm_get_phy_82575;
   2261 		sc->phy.release = wm_put_phy_82575;
   2262 		sc->nvm.acquire = wm_get_nvm_80003;
   2263 		sc->nvm.release = wm_put_nvm_80003;
   2264 		break;
   2265 	case WM_T_ICH8:
   2266 	case WM_T_ICH9:
   2267 	case WM_T_ICH10:
   2268 	case WM_T_PCH:
   2269 	case WM_T_PCH2:
   2270 	case WM_T_PCH_LPT:
   2271 		sc->nvm.read = wm_nvm_read_ich8;
   2272 		/* FLASH */
   2273 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2274 		sc->sc_nvm_wordsize = 2048;
   2275 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2276 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2277 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2278 			aprint_error_dev(sc->sc_dev,
   2279 			    "can't map FLASH registers\n");
   2280 			goto out;
   2281 		}
   2282 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2283 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2284 		    ICH_FLASH_SECTOR_SIZE;
   2285 		sc->sc_ich8_flash_bank_size =
   2286 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2287 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2288 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2289 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2290 		sc->sc_flashreg_offset = 0;
   2291 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2292 		sc->phy.release = wm_put_swflag_ich8lan;
   2293 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2294 		sc->nvm.release = wm_put_nvm_ich8lan;
   2295 		break;
   2296 	case WM_T_PCH_SPT:
   2297 	case WM_T_PCH_CNP:
   2298 		sc->nvm.read = wm_nvm_read_spt;
   2299 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2300 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2301 		sc->sc_flasht = sc->sc_st;
   2302 		sc->sc_flashh = sc->sc_sh;
   2303 		sc->sc_ich8_flash_base = 0;
   2304 		sc->sc_nvm_wordsize =
   2305 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2306 		    * NVM_SIZE_MULTIPLIER;
   2307 		/* It is size in bytes, we want words */
   2308 		sc->sc_nvm_wordsize /= 2;
   2309 		/* Assume 2 banks */
   2310 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2311 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2312 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2313 		sc->phy.release = wm_put_swflag_ich8lan;
   2314 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2315 		sc->nvm.release = wm_put_nvm_ich8lan;
   2316 		break;
   2317 	case WM_T_I210:
   2318 	case WM_T_I211:
   2319 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2320 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2321 		if (wm_nvm_flash_presence_i210(sc)) {
   2322 			sc->nvm.read = wm_nvm_read_eerd;
   2323 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2324 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2325 			wm_nvm_set_addrbits_size_eecd(sc);
   2326 		} else {
   2327 			sc->nvm.read = wm_nvm_read_invm;
   2328 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2329 			sc->sc_nvm_wordsize = INVM_SIZE;
   2330 		}
   2331 		sc->phy.acquire = wm_get_phy_82575;
   2332 		sc->phy.release = wm_put_phy_82575;
   2333 		sc->nvm.acquire = wm_get_nvm_80003;
   2334 		sc->nvm.release = wm_put_nvm_80003;
   2335 		break;
   2336 	default:
   2337 		break;
   2338 	}
   2339 
   2340 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2341 	switch (sc->sc_type) {
   2342 	case WM_T_82571:
   2343 	case WM_T_82572:
   2344 		reg = CSR_READ(sc, WMREG_SWSM2);
   2345 		if ((reg & SWSM2_LOCK) == 0) {
   2346 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2347 			force_clear_smbi = true;
   2348 		} else
   2349 			force_clear_smbi = false;
   2350 		break;
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 		force_clear_smbi = true;
   2355 		break;
   2356 	default:
   2357 		force_clear_smbi = false;
   2358 		break;
   2359 	}
   2360 	if (force_clear_smbi) {
   2361 		reg = CSR_READ(sc, WMREG_SWSM);
   2362 		if ((reg & SWSM_SMBI) != 0)
   2363 			aprint_error_dev(sc->sc_dev,
   2364 			    "Please update the Bootagent\n");
   2365 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2366 	}
   2367 
   2368 	/*
   2369 	 * Defer printing the EEPROM type until after verifying the checksum
   2370 	 * This allows the EEPROM type to be printed correctly in the case
   2371 	 * that no EEPROM is attached.
   2372 	 */
   2373 	/*
   2374 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2375 	 * this for later, so we can fail future reads from the EEPROM.
   2376 	 */
   2377 	if (wm_nvm_validate_checksum(sc)) {
   2378 		/*
   2379 		 * Read twice again because some PCI-e parts fail the
   2380 		 * first check due to the link being in sleep state.
   2381 		 */
   2382 		if (wm_nvm_validate_checksum(sc))
   2383 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2384 	}
   2385 
   2386 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2387 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2388 	else {
   2389 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2390 		    sc->sc_nvm_wordsize);
   2391 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2392 			aprint_verbose("iNVM");
   2393 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2394 			aprint_verbose("FLASH(HW)");
   2395 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2396 			aprint_verbose("FLASH");
   2397 		else {
   2398 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2399 				eetype = "SPI";
   2400 			else
   2401 				eetype = "MicroWire";
   2402 			aprint_verbose("(%d address bits) %s EEPROM",
   2403 			    sc->sc_nvm_addrbits, eetype);
   2404 		}
   2405 	}
   2406 	wm_nvm_version(sc);
   2407 	aprint_verbose("\n");
   2408 
   2409 	/*
   2410 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2411 	 * incorrect.
   2412 	 */
   2413 	wm_gmii_setup_phytype(sc, 0, 0);
   2414 
   2415 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2416 	switch (sc->sc_type) {
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 	case WM_T_PCH_CNP:
   2425 		apme_mask = WUC_APME;
   2426 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2427 		if ((eeprom_data & apme_mask) != 0)
   2428 			sc->sc_flags |= WM_F_WOL;
   2429 		break;
   2430 	default:
   2431 		break;
   2432 	}
   2433 
   2434 	/* Reset the chip to a known state. */
   2435 	wm_reset(sc);
   2436 
   2437 	/*
   2438 	 * Check for I21[01] PLL workaround.
   2439 	 *
   2440 	 * Three cases:
   2441 	 * a) Chip is I211.
   2442 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2443 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2444 	 */
   2445 	if (sc->sc_type == WM_T_I211)
   2446 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2447 	if (sc->sc_type == WM_T_I210) {
   2448 		if (!wm_nvm_flash_presence_i210(sc))
   2449 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2450 		else if ((sc->sc_nvm_ver_major < 3)
   2451 		    || ((sc->sc_nvm_ver_major == 3)
   2452 			&& (sc->sc_nvm_ver_minor < 25))) {
   2453 			aprint_verbose_dev(sc->sc_dev,
   2454 			    "ROM image version %d.%d is older than 3.25\n",
   2455 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2456 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2457 		}
   2458 	}
   2459 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2460 		wm_pll_workaround_i210(sc);
   2461 
   2462 	wm_get_wakeup(sc);
   2463 
   2464 	/* Non-AMT based hardware can now take control from firmware */
   2465 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2466 		wm_get_hw_control(sc);
   2467 
   2468 	/*
   2469 	 * Read the Ethernet address from the EEPROM, if not first found
   2470 	 * in device properties.
   2471 	 */
   2472 	ea = prop_dictionary_get(dict, "mac-address");
   2473 	if (ea != NULL) {
   2474 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2475 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2476 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2477 	} else {
   2478 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2479 			aprint_error_dev(sc->sc_dev,
   2480 			    "unable to read Ethernet address\n");
   2481 			goto out;
   2482 		}
   2483 	}
   2484 
   2485 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2486 	    ether_sprintf(enaddr));
   2487 
   2488 	/*
   2489 	 * Read the config info from the EEPROM, and set up various
   2490 	 * bits in the control registers based on their contents.
   2491 	 */
   2492 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2493 	if (pn != NULL) {
   2494 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2495 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2496 	} else {
   2497 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2498 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2499 			goto out;
   2500 		}
   2501 	}
   2502 
   2503 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2504 	if (pn != NULL) {
   2505 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2506 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2507 	} else {
   2508 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2509 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2510 			goto out;
   2511 		}
   2512 	}
   2513 
   2514 	/* check for WM_F_WOL */
   2515 	switch (sc->sc_type) {
   2516 	case WM_T_82542_2_0:
   2517 	case WM_T_82542_2_1:
   2518 	case WM_T_82543:
   2519 		/* dummy? */
   2520 		eeprom_data = 0;
   2521 		apme_mask = NVM_CFG3_APME;
   2522 		break;
   2523 	case WM_T_82544:
   2524 		apme_mask = NVM_CFG2_82544_APM_EN;
   2525 		eeprom_data = cfg2;
   2526 		break;
   2527 	case WM_T_82546:
   2528 	case WM_T_82546_3:
   2529 	case WM_T_82571:
   2530 	case WM_T_82572:
   2531 	case WM_T_82573:
   2532 	case WM_T_82574:
   2533 	case WM_T_82583:
   2534 	case WM_T_80003:
   2535 	case WM_T_82575:
   2536 	case WM_T_82576:
   2537 		apme_mask = NVM_CFG3_APME;
   2538 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2539 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2540 		break;
   2541 	case WM_T_82580:
   2542 	case WM_T_I350:
   2543 	case WM_T_I354:
   2544 	case WM_T_I210:
   2545 	case WM_T_I211:
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc,
   2548 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2549 		    1, &eeprom_data);
   2550 		break;
   2551 	case WM_T_ICH8:
   2552 	case WM_T_ICH9:
   2553 	case WM_T_ICH10:
   2554 	case WM_T_PCH:
   2555 	case WM_T_PCH2:
   2556 	case WM_T_PCH_LPT:
   2557 	case WM_T_PCH_SPT:
   2558 	case WM_T_PCH_CNP:
   2559 		/* Already checked before wm_reset () */
   2560 		apme_mask = eeprom_data = 0;
   2561 		break;
   2562 	default: /* XXX 82540 */
   2563 		apme_mask = NVM_CFG3_APME;
   2564 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2565 		break;
   2566 	}
   2567 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2568 	if ((eeprom_data & apme_mask) != 0)
   2569 		sc->sc_flags |= WM_F_WOL;
   2570 
   2571 	/*
   2572 	 * We have the eeprom settings, now apply the special cases
   2573 	 * where the eeprom may be wrong or the board won't support
   2574 	 * wake on lan on a particular port
   2575 	 */
   2576 	switch (sc->sc_pcidevid) {
   2577 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2578 		sc->sc_flags &= ~WM_F_WOL;
   2579 		break;
   2580 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2581 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2582 		/* Wake events only supported on port A for dual fiber
   2583 		 * regardless of eeprom setting */
   2584 		if (sc->sc_funcid == 1)
   2585 			sc->sc_flags &= ~WM_F_WOL;
   2586 		break;
   2587 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2588 		/* If quad port adapter, disable WoL on all but port A */
   2589 		if (sc->sc_funcid != 0)
   2590 			sc->sc_flags &= ~WM_F_WOL;
   2591 		break;
   2592 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2593 		/* Wake events only supported on port A for dual fiber
   2594 		 * regardless of eeprom setting */
   2595 		if (sc->sc_funcid == 1)
   2596 			sc->sc_flags &= ~WM_F_WOL;
   2597 		break;
   2598 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2600 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2601 		/* If quad port adapter, disable WoL on all but port A */
   2602 		if (sc->sc_funcid != 0)
   2603 			sc->sc_flags &= ~WM_F_WOL;
   2604 		break;
   2605 	}
   2606 
   2607 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2608 		/* Check NVM for autonegotiation */
   2609 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2610 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2611 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2612 		}
   2613 	}
   2614 
   2615 	/*
   2616 	 * XXX need special handling for some multiple port cards
   2617 	 * to disable a paticular port.
   2618 	 */
   2619 
   2620 	if (sc->sc_type >= WM_T_82544) {
   2621 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2622 		if (pn != NULL) {
   2623 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2624 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2625 		} else {
   2626 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2627 				aprint_error_dev(sc->sc_dev,
   2628 				    "unable to read SWDPIN\n");
   2629 				goto out;
   2630 			}
   2631 		}
   2632 	}
   2633 
   2634 	if (cfg1 & NVM_CFG1_ILOS)
   2635 		sc->sc_ctrl |= CTRL_ILOS;
   2636 
   2637 	/*
   2638 	 * XXX
   2639 	 * This code isn't correct because pin 2 and 3 are located
   2640 	 * in different position on newer chips. Check all datasheet.
   2641 	 *
   2642 	 * Until resolve this problem, check if a chip < 82580
   2643 	 */
   2644 	if (sc->sc_type <= WM_T_82580) {
   2645 		if (sc->sc_type >= WM_T_82544) {
   2646 			sc->sc_ctrl |=
   2647 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2648 			    CTRL_SWDPIO_SHIFT;
   2649 			sc->sc_ctrl |=
   2650 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2651 			    CTRL_SWDPINS_SHIFT;
   2652 		} else {
   2653 			sc->sc_ctrl |=
   2654 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2655 			    CTRL_SWDPIO_SHIFT;
   2656 		}
   2657 	}
   2658 
   2659 	/* XXX For other than 82580? */
   2660 	if (sc->sc_type == WM_T_82580) {
   2661 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2662 		if (nvmword & __BIT(13))
   2663 			sc->sc_ctrl |= CTRL_ILOS;
   2664 	}
   2665 
   2666 #if 0
   2667 	if (sc->sc_type >= WM_T_82544) {
   2668 		if (cfg1 & NVM_CFG1_IPS0)
   2669 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2670 		if (cfg1 & NVM_CFG1_IPS1)
   2671 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2672 		sc->sc_ctrl_ext |=
   2673 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2674 		    CTRL_EXT_SWDPIO_SHIFT;
   2675 		sc->sc_ctrl_ext |=
   2676 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2677 		    CTRL_EXT_SWDPINS_SHIFT;
   2678 	} else {
   2679 		sc->sc_ctrl_ext |=
   2680 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2681 		    CTRL_EXT_SWDPIO_SHIFT;
   2682 	}
   2683 #endif
   2684 
   2685 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2686 #if 0
   2687 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2688 #endif
   2689 
   2690 	if (sc->sc_type == WM_T_PCH) {
   2691 		uint16_t val;
   2692 
   2693 		/* Save the NVM K1 bit setting */
   2694 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2695 
   2696 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2697 			sc->sc_nvm_k1_enabled = 1;
   2698 		else
   2699 			sc->sc_nvm_k1_enabled = 0;
   2700 	}
   2701 
   2702 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2703 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2704 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2705 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2706 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2707 	    || sc->sc_type == WM_T_82573
   2708 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2709 		/* Copper only */
   2710 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2711 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2712 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2713 	    || (sc->sc_type ==WM_T_I211)) {
   2714 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2715 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2716 		switch (link_mode) {
   2717 		case CTRL_EXT_LINK_MODE_1000KX:
   2718 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2719 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2720 			break;
   2721 		case CTRL_EXT_LINK_MODE_SGMII:
   2722 			if (wm_sgmii_uses_mdio(sc)) {
   2723 				aprint_verbose_dev(sc->sc_dev,
   2724 				    "SGMII(MDIO)\n");
   2725 				sc->sc_flags |= WM_F_SGMII;
   2726 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2727 				break;
   2728 			}
   2729 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2730 			/*FALLTHROUGH*/
   2731 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2732 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2733 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2734 				if (link_mode
   2735 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2736 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2737 					sc->sc_flags |= WM_F_SGMII;
   2738 				} else {
   2739 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2740 					aprint_verbose_dev(sc->sc_dev,
   2741 					    "SERDES\n");
   2742 				}
   2743 				break;
   2744 			}
   2745 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2746 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2747 
   2748 			/* Change current link mode setting */
   2749 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2750 			switch (sc->sc_mediatype) {
   2751 			case WM_MEDIATYPE_COPPER:
   2752 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2753 				break;
   2754 			case WM_MEDIATYPE_SERDES:
   2755 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2756 				break;
   2757 			default:
   2758 				break;
   2759 			}
   2760 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2761 			break;
   2762 		case CTRL_EXT_LINK_MODE_GMII:
   2763 		default:
   2764 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2765 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2766 			break;
   2767 		}
   2768 
   2769 		reg &= ~CTRL_EXT_I2C_ENA;
   2770 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2771 			reg |= CTRL_EXT_I2C_ENA;
   2772 		else
   2773 			reg &= ~CTRL_EXT_I2C_ENA;
   2774 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2775 	} else if (sc->sc_type < WM_T_82543 ||
   2776 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2777 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2778 			aprint_error_dev(sc->sc_dev,
   2779 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2780 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2781 		}
   2782 	} else {
   2783 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2784 			aprint_error_dev(sc->sc_dev,
   2785 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2786 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2787 		}
   2788 	}
   2789 
   2790 	if (sc->sc_type >= WM_T_PCH2)
   2791 		sc->sc_flags |= WM_F_EEE;
   2792 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2793 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2794 		/* XXX: Need special handling for I354. (not yet) */
   2795 		if (sc->sc_type != WM_T_I354)
   2796 			sc->sc_flags |= WM_F_EEE;
   2797 	}
   2798 
   2799 	/* Set device properties (macflags) */
   2800 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2801 
   2802 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2803 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2804 
   2805 	/* Initialize the media structures accordingly. */
   2806 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2807 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2808 	else
   2809 		wm_tbi_mediainit(sc); /* All others */
   2810 
   2811 	ifp = &sc->sc_ethercom.ec_if;
   2812 	xname = device_xname(sc->sc_dev);
   2813 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2814 	ifp->if_softc = sc;
   2815 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2816 #ifdef WM_MPSAFE
   2817 	ifp->if_extflags = IFEF_MPSAFE;
   2818 #endif
   2819 	ifp->if_ioctl = wm_ioctl;
   2820 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2821 		ifp->if_start = wm_nq_start;
   2822 		/*
   2823 		 * When the number of CPUs is one and the controller can use
   2824 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2825 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2826 		 * and the other is used for link status changing.
   2827 		 * In this situation, wm_nq_transmit() is disadvantageous
   2828 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2829 		 */
   2830 		if (wm_is_using_multiqueue(sc))
   2831 			ifp->if_transmit = wm_nq_transmit;
   2832 	} else {
   2833 		ifp->if_start = wm_start;
   2834 		/*
   2835 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2836 		 */
   2837 		if (wm_is_using_multiqueue(sc))
   2838 			ifp->if_transmit = wm_transmit;
   2839 	}
   2840 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2841 	ifp->if_init = wm_init;
   2842 	ifp->if_stop = wm_stop;
   2843 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2844 	IFQ_SET_READY(&ifp->if_snd);
   2845 
   2846 	/* Check for jumbo frame */
   2847 	switch (sc->sc_type) {
   2848 	case WM_T_82573:
   2849 		/* XXX limited to 9234 if ASPM is disabled */
   2850 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2851 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2852 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2853 		break;
   2854 	case WM_T_82571:
   2855 	case WM_T_82572:
   2856 	case WM_T_82574:
   2857 	case WM_T_82583:
   2858 	case WM_T_82575:
   2859 	case WM_T_82576:
   2860 	case WM_T_82580:
   2861 	case WM_T_I350:
   2862 	case WM_T_I354:
   2863 	case WM_T_I210:
   2864 	case WM_T_I211:
   2865 	case WM_T_80003:
   2866 	case WM_T_ICH9:
   2867 	case WM_T_ICH10:
   2868 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2869 	case WM_T_PCH_LPT:
   2870 	case WM_T_PCH_SPT:
   2871 	case WM_T_PCH_CNP:
   2872 		/* XXX limited to 9234 */
   2873 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2874 		break;
   2875 	case WM_T_PCH:
   2876 		/* XXX limited to 4096 */
   2877 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2878 		break;
   2879 	case WM_T_82542_2_0:
   2880 	case WM_T_82542_2_1:
   2881 	case WM_T_ICH8:
   2882 		/* No support for jumbo frame */
   2883 		break;
   2884 	default:
   2885 		/* ETHER_MAX_LEN_JUMBO */
   2886 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2887 		break;
   2888 	}
   2889 
   2890 	/* If we're a i82543 or greater, we can support VLANs. */
   2891 	if (sc->sc_type >= WM_T_82543) {
   2892 		sc->sc_ethercom.ec_capabilities |=
   2893 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2894 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2895 	}
   2896 
   2897 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2898 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2899 
   2900 	/*
   2901 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2902 	 * on i82543 and later.
   2903 	 */
   2904 	if (sc->sc_type >= WM_T_82543) {
   2905 		ifp->if_capabilities |=
   2906 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2907 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2908 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2909 		    IFCAP_CSUM_TCPv6_Tx |
   2910 		    IFCAP_CSUM_UDPv6_Tx;
   2911 	}
   2912 
   2913 	/*
   2914 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2915 	 *
   2916 	 *	82541GI (8086:1076) ... no
   2917 	 *	82572EI (8086:10b9) ... yes
   2918 	 */
   2919 	if (sc->sc_type >= WM_T_82571) {
   2920 		ifp->if_capabilities |=
   2921 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2922 	}
   2923 
   2924 	/*
   2925 	 * If we're a i82544 or greater (except i82547), we can do
   2926 	 * TCP segmentation offload.
   2927 	 */
   2928 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2929 		ifp->if_capabilities |= IFCAP_TSOv4;
   2930 	}
   2931 
   2932 	if (sc->sc_type >= WM_T_82571) {
   2933 		ifp->if_capabilities |= IFCAP_TSOv6;
   2934 	}
   2935 
   2936 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2937 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2938 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2939 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2940 
   2941 #ifdef WM_MPSAFE
   2942 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2943 #else
   2944 	sc->sc_core_lock = NULL;
   2945 #endif
   2946 
   2947 	/* Attach the interface. */
   2948 	error = if_initialize(ifp);
   2949 	if (error != 0) {
   2950 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2951 		    error);
   2952 		return; /* Error */
   2953 	}
   2954 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2955 	ether_ifattach(ifp, enaddr);
   2956 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2957 	if_register(ifp);
   2958 
   2959 #ifdef WM_EVENT_COUNTERS
   2960 	/* Attach event counters. */
   2961 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2962 	    NULL, xname, "linkintr");
   2963 
   2964 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2965 	    NULL, xname, "tx_xoff");
   2966 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2967 	    NULL, xname, "tx_xon");
   2968 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2969 	    NULL, xname, "rx_xoff");
   2970 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2971 	    NULL, xname, "rx_xon");
   2972 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2973 	    NULL, xname, "rx_macctl");
   2974 #endif /* WM_EVENT_COUNTERS */
   2975 
   2976 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2977 		pmf_class_network_register(self, ifp);
   2978 	else
   2979 		aprint_error_dev(self, "couldn't establish power handler\n");
   2980 
   2981 	sc->sc_flags |= WM_F_ATTACHED;
   2982 out:
   2983 	return;
   2984 }
   2985 
   2986 /* The detach function (ca_detach) */
   2987 static int
   2988 wm_detach(device_t self, int flags __unused)
   2989 {
   2990 	struct wm_softc *sc = device_private(self);
   2991 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2992 	int i;
   2993 
   2994 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2995 		return 0;
   2996 
   2997 	/* Stop the interface. Callouts are stopped in it. */
   2998 	wm_stop(ifp, 1);
   2999 
   3000 	pmf_device_deregister(self);
   3001 
   3002 #ifdef WM_EVENT_COUNTERS
   3003 	evcnt_detach(&sc->sc_ev_linkintr);
   3004 
   3005 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3006 	evcnt_detach(&sc->sc_ev_tx_xon);
   3007 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3008 	evcnt_detach(&sc->sc_ev_rx_xon);
   3009 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3010 #endif /* WM_EVENT_COUNTERS */
   3011 
   3012 	/* Tell the firmware about the release */
   3013 	WM_CORE_LOCK(sc);
   3014 	wm_release_manageability(sc);
   3015 	wm_release_hw_control(sc);
   3016 	wm_enable_wakeup(sc);
   3017 	WM_CORE_UNLOCK(sc);
   3018 
   3019 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3020 
   3021 	/* Delete all remaining media. */
   3022 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3023 
   3024 	ether_ifdetach(ifp);
   3025 	if_detach(ifp);
   3026 	if_percpuq_destroy(sc->sc_ipq);
   3027 
   3028 	/* Unload RX dmamaps and free mbufs */
   3029 	for (i = 0; i < sc->sc_nqueues; i++) {
   3030 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3031 		mutex_enter(rxq->rxq_lock);
   3032 		wm_rxdrain(rxq);
   3033 		mutex_exit(rxq->rxq_lock);
   3034 	}
   3035 	/* Must unlock here */
   3036 
   3037 	/* Disestablish the interrupt handler */
   3038 	for (i = 0; i < sc->sc_nintrs; i++) {
   3039 		if (sc->sc_ihs[i] != NULL) {
   3040 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3041 			sc->sc_ihs[i] = NULL;
   3042 		}
   3043 	}
   3044 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3045 
   3046 	wm_free_txrx_queues(sc);
   3047 
   3048 	/* Unmap the registers */
   3049 	if (sc->sc_ss) {
   3050 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3051 		sc->sc_ss = 0;
   3052 	}
   3053 	if (sc->sc_ios) {
   3054 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3055 		sc->sc_ios = 0;
   3056 	}
   3057 	if (sc->sc_flashs) {
   3058 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3059 		sc->sc_flashs = 0;
   3060 	}
   3061 
   3062 	if (sc->sc_core_lock)
   3063 		mutex_obj_free(sc->sc_core_lock);
   3064 	if (sc->sc_ich_phymtx)
   3065 		mutex_obj_free(sc->sc_ich_phymtx);
   3066 	if (sc->sc_ich_nvmmtx)
   3067 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3068 
   3069 	return 0;
   3070 }
   3071 
   3072 static bool
   3073 wm_suspend(device_t self, const pmf_qual_t *qual)
   3074 {
   3075 	struct wm_softc *sc = device_private(self);
   3076 
   3077 	wm_release_manageability(sc);
   3078 	wm_release_hw_control(sc);
   3079 	wm_enable_wakeup(sc);
   3080 
   3081 	return true;
   3082 }
   3083 
   3084 static bool
   3085 wm_resume(device_t self, const pmf_qual_t *qual)
   3086 {
   3087 	struct wm_softc *sc = device_private(self);
   3088 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3089 	pcireg_t reg;
   3090 	char buf[256];
   3091 
   3092 	reg = CSR_READ(sc, WMREG_WUS);
   3093 	if (reg != 0) {
   3094 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3095 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3096 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3097 	}
   3098 
   3099 	if (sc->sc_type >= WM_T_PCH2)
   3100 		wm_resume_workarounds_pchlan(sc);
   3101 	if ((ifp->if_flags & IFF_UP) == 0) {
   3102 		wm_reset(sc);
   3103 		/* Non-AMT based hardware can now take control from firmware */
   3104 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3105 			wm_get_hw_control(sc);
   3106 		wm_init_manageability(sc);
   3107 	} else {
   3108 		/*
   3109 		 * We called pmf_class_network_register(), so if_init() is
   3110 		 * automatically called when IFF_UP. wm_reset(),
   3111 		 * wm_get_hw_control() and wm_init_manageability() are called
   3112 		 * via wm_init().
   3113 		 */
   3114 	}
   3115 
   3116 	return true;
   3117 }
   3118 
   3119 /*
   3120  * wm_watchdog:		[ifnet interface function]
   3121  *
   3122  *	Watchdog timer handler.
   3123  */
   3124 static void
   3125 wm_watchdog(struct ifnet *ifp)
   3126 {
   3127 	int qid;
   3128 	struct wm_softc *sc = ifp->if_softc;
   3129 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3130 
   3131 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3132 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3133 
   3134 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3135 	}
   3136 
   3137 	/* IF any of queues hanged up, reset the interface. */
   3138 	if (hang_queue != 0) {
   3139 		(void)wm_init(ifp);
   3140 
   3141 		/*
   3142 		 * There are still some upper layer processing which call
   3143 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3144 		 */
   3145 		/* Try to get more packets going. */
   3146 		ifp->if_start(ifp);
   3147 	}
   3148 }
   3149 
   3150 
   3151 static void
   3152 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3153 {
   3154 
   3155 	mutex_enter(txq->txq_lock);
   3156 	if (txq->txq_sending &&
   3157 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3158 		wm_watchdog_txq_locked(ifp, txq, hang);
   3159 
   3160 	mutex_exit(txq->txq_lock);
   3161 }
   3162 
   3163 static void
   3164 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3165     uint16_t *hang)
   3166 {
   3167 	struct wm_softc *sc = ifp->if_softc;
   3168 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3169 
   3170 	KASSERT(mutex_owned(txq->txq_lock));
   3171 
   3172 	/*
   3173 	 * Since we're using delayed interrupts, sweep up
   3174 	 * before we report an error.
   3175 	 */
   3176 	wm_txeof(txq, UINT_MAX);
   3177 
   3178 	if (txq->txq_sending)
   3179 		*hang |= __BIT(wmq->wmq_id);
   3180 
   3181 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3182 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3183 		    device_xname(sc->sc_dev));
   3184 	} else {
   3185 #ifdef WM_DEBUG
   3186 		int i, j;
   3187 		struct wm_txsoft *txs;
   3188 #endif
   3189 		log(LOG_ERR,
   3190 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3191 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3192 		    txq->txq_next);
   3193 		ifp->if_oerrors++;
   3194 #ifdef WM_DEBUG
   3195 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3196 		    i = WM_NEXTTXS(txq, i)) {
   3197 			txs = &txq->txq_soft[i];
   3198 			printf("txs %d tx %d -> %d\n",
   3199 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3200 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3201 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3202 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3203 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3204 					printf("\t %#08x%08x\n",
   3205 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3206 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3207 				} else {
   3208 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3209 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3210 					    txq->txq_descs[j].wtx_addr.wa_low);
   3211 					printf("\t %#04x%02x%02x%08x\n",
   3212 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3213 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3214 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3215 					    txq->txq_descs[j].wtx_cmdlen);
   3216 				}
   3217 				if (j == txs->txs_lastdesc)
   3218 					break;
   3219 			}
   3220 		}
   3221 #endif
   3222 	}
   3223 }
   3224 
   3225 /*
   3226  * wm_tick:
   3227  *
   3228  *	One second timer, used to check link status, sweep up
   3229  *	completed transmit jobs, etc.
   3230  */
   3231 static void
   3232 wm_tick(void *arg)
   3233 {
   3234 	struct wm_softc *sc = arg;
   3235 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3236 #ifndef WM_MPSAFE
   3237 	int s = splnet();
   3238 #endif
   3239 
   3240 	WM_CORE_LOCK(sc);
   3241 
   3242 	if (sc->sc_core_stopping) {
   3243 		WM_CORE_UNLOCK(sc);
   3244 #ifndef WM_MPSAFE
   3245 		splx(s);
   3246 #endif
   3247 		return;
   3248 	}
   3249 
   3250 	if (sc->sc_type >= WM_T_82542_2_1) {
   3251 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3252 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3253 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3254 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3255 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3256 	}
   3257 
   3258 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3259 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3260 	    + CSR_READ(sc, WMREG_CRCERRS)
   3261 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3262 	    + CSR_READ(sc, WMREG_SYMERRC)
   3263 	    + CSR_READ(sc, WMREG_RXERRC)
   3264 	    + CSR_READ(sc, WMREG_SEC)
   3265 	    + CSR_READ(sc, WMREG_CEXTERR)
   3266 	    + CSR_READ(sc, WMREG_RLEC);
   3267 	/*
   3268 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3269 	 * memory. It does not mean the number of dropped packet. Because
   3270 	 * ethernet controller can receive packets in such case if there is
   3271 	 * space in phy's FIFO.
   3272 	 *
   3273 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3274 	 * own EVCNT instead of if_iqdrops.
   3275 	 */
   3276 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3277 
   3278 	if (sc->sc_flags & WM_F_HAS_MII)
   3279 		mii_tick(&sc->sc_mii);
   3280 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3281 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3282 		wm_serdes_tick(sc);
   3283 	else
   3284 		wm_tbi_tick(sc);
   3285 
   3286 	WM_CORE_UNLOCK(sc);
   3287 
   3288 	wm_watchdog(ifp);
   3289 
   3290 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3291 }
   3292 
   3293 static int
   3294 wm_ifflags_cb(struct ethercom *ec)
   3295 {
   3296 	struct ifnet *ifp = &ec->ec_if;
   3297 	struct wm_softc *sc = ifp->if_softc;
   3298 	int iffchange, ecchange;
   3299 	bool needreset = false;
   3300 	int rc = 0;
   3301 
   3302 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3303 		device_xname(sc->sc_dev), __func__));
   3304 
   3305 	WM_CORE_LOCK(sc);
   3306 
   3307 	/*
   3308 	 * Check for if_flags.
   3309 	 * Main usage is to prevent linkdown when opening bpf.
   3310 	 */
   3311 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3312 	sc->sc_if_flags = ifp->if_flags;
   3313 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3314 		needreset = true;
   3315 		goto ec;
   3316 	}
   3317 
   3318 	/* iff related updates */
   3319 	if ((iffchange & IFF_PROMISC) != 0)
   3320 		wm_set_filter(sc);
   3321 
   3322 	wm_set_vlan(sc);
   3323 
   3324 ec:
   3325 	/* Check for ec_capenable. */
   3326 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3327 	sc->sc_ec_capenable = ec->ec_capenable;
   3328 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3329 		needreset = true;
   3330 		goto out;
   3331 	}
   3332 
   3333 	/* ec related updates */
   3334 	wm_set_eee(sc);
   3335 
   3336 out:
   3337 	if (needreset)
   3338 		rc = ENETRESET;
   3339 	WM_CORE_UNLOCK(sc);
   3340 
   3341 	return rc;
   3342 }
   3343 
   3344 /*
   3345  * wm_ioctl:		[ifnet interface function]
   3346  *
   3347  *	Handle control requests from the operator.
   3348  */
   3349 static int
   3350 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3351 {
   3352 	struct wm_softc *sc = ifp->if_softc;
   3353 	struct ifreq *ifr = (struct ifreq *)data;
   3354 	struct ifaddr *ifa = (struct ifaddr *)data;
   3355 	struct sockaddr_dl *sdl;
   3356 	int s, error;
   3357 
   3358 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3359 		device_xname(sc->sc_dev), __func__));
   3360 
   3361 #ifndef WM_MPSAFE
   3362 	s = splnet();
   3363 #endif
   3364 	switch (cmd) {
   3365 	case SIOCSIFMEDIA:
   3366 		WM_CORE_LOCK(sc);
   3367 		/* Flow control requires full-duplex mode. */
   3368 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3369 		    (ifr->ifr_media & IFM_FDX) == 0)
   3370 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3371 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3372 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3373 				/* We can do both TXPAUSE and RXPAUSE. */
   3374 				ifr->ifr_media |=
   3375 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3376 			}
   3377 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3378 		}
   3379 		WM_CORE_UNLOCK(sc);
   3380 #ifdef WM_MPSAFE
   3381 		s = splnet();
   3382 #endif
   3383 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3384 #ifdef WM_MPSAFE
   3385 		splx(s);
   3386 #endif
   3387 		break;
   3388 	case SIOCINITIFADDR:
   3389 		WM_CORE_LOCK(sc);
   3390 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3391 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3392 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3393 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3394 			/* Unicast address is the first multicast entry */
   3395 			wm_set_filter(sc);
   3396 			error = 0;
   3397 			WM_CORE_UNLOCK(sc);
   3398 			break;
   3399 		}
   3400 		WM_CORE_UNLOCK(sc);
   3401 		/*FALLTHROUGH*/
   3402 	default:
   3403 #ifdef WM_MPSAFE
   3404 		s = splnet();
   3405 #endif
   3406 		/* It may call wm_start, so unlock here */
   3407 		error = ether_ioctl(ifp, cmd, data);
   3408 #ifdef WM_MPSAFE
   3409 		splx(s);
   3410 #endif
   3411 		if (error != ENETRESET)
   3412 			break;
   3413 
   3414 		error = 0;
   3415 
   3416 		if (cmd == SIOCSIFCAP)
   3417 			error = (*ifp->if_init)(ifp);
   3418 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3419 			;
   3420 		else if (ifp->if_flags & IFF_RUNNING) {
   3421 			/*
   3422 			 * Multicast list has changed; set the hardware filter
   3423 			 * accordingly.
   3424 			 */
   3425 			WM_CORE_LOCK(sc);
   3426 			wm_set_filter(sc);
   3427 			WM_CORE_UNLOCK(sc);
   3428 		}
   3429 		break;
   3430 	}
   3431 
   3432 #ifndef WM_MPSAFE
   3433 	splx(s);
   3434 #endif
   3435 	return error;
   3436 }
   3437 
   3438 /* MAC address related */
   3439 
   3440 /*
   3441  * Get the offset of MAC address and return it.
   3442  * If error occured, use offset 0.
   3443  */
   3444 static uint16_t
   3445 wm_check_alt_mac_addr(struct wm_softc *sc)
   3446 {
   3447 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3448 	uint16_t offset = NVM_OFF_MACADDR;
   3449 
   3450 	/* Try to read alternative MAC address pointer */
   3451 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3452 		return 0;
   3453 
   3454 	/* Check pointer if it's valid or not. */
   3455 	if ((offset == 0x0000) || (offset == 0xffff))
   3456 		return 0;
   3457 
   3458 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3459 	/*
   3460 	 * Check whether alternative MAC address is valid or not.
   3461 	 * Some cards have non 0xffff pointer but those don't use
   3462 	 * alternative MAC address in reality.
   3463 	 *
   3464 	 * Check whether the broadcast bit is set or not.
   3465 	 */
   3466 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3467 		if (((myea[0] & 0xff) & 0x01) == 0)
   3468 			return offset; /* Found */
   3469 
   3470 	/* Not found */
   3471 	return 0;
   3472 }
   3473 
   3474 static int
   3475 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3476 {
   3477 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3478 	uint16_t offset = NVM_OFF_MACADDR;
   3479 	int do_invert = 0;
   3480 
   3481 	switch (sc->sc_type) {
   3482 	case WM_T_82580:
   3483 	case WM_T_I350:
   3484 	case WM_T_I354:
   3485 		/* EEPROM Top Level Partitioning */
   3486 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3487 		break;
   3488 	case WM_T_82571:
   3489 	case WM_T_82575:
   3490 	case WM_T_82576:
   3491 	case WM_T_80003:
   3492 	case WM_T_I210:
   3493 	case WM_T_I211:
   3494 		offset = wm_check_alt_mac_addr(sc);
   3495 		if (offset == 0)
   3496 			if ((sc->sc_funcid & 0x01) == 1)
   3497 				do_invert = 1;
   3498 		break;
   3499 	default:
   3500 		if ((sc->sc_funcid & 0x01) == 1)
   3501 			do_invert = 1;
   3502 		break;
   3503 	}
   3504 
   3505 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3506 		goto bad;
   3507 
   3508 	enaddr[0] = myea[0] & 0xff;
   3509 	enaddr[1] = myea[0] >> 8;
   3510 	enaddr[2] = myea[1] & 0xff;
   3511 	enaddr[3] = myea[1] >> 8;
   3512 	enaddr[4] = myea[2] & 0xff;
   3513 	enaddr[5] = myea[2] >> 8;
   3514 
   3515 	/*
   3516 	 * Toggle the LSB of the MAC address on the second port
   3517 	 * of some dual port cards.
   3518 	 */
   3519 	if (do_invert != 0)
   3520 		enaddr[5] ^= 1;
   3521 
   3522 	return 0;
   3523 
   3524  bad:
   3525 	return -1;
   3526 }
   3527 
   3528 /*
   3529  * wm_set_ral:
   3530  *
   3531  *	Set an entery in the receive address list.
   3532  */
   3533 static void
   3534 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3535 {
   3536 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3537 	uint32_t wlock_mac;
   3538 	int rv;
   3539 
   3540 	if (enaddr != NULL) {
   3541 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3542 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3543 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3544 		ral_hi |= RAL_AV;
   3545 	} else {
   3546 		ral_lo = 0;
   3547 		ral_hi = 0;
   3548 	}
   3549 
   3550 	switch (sc->sc_type) {
   3551 	case WM_T_82542_2_0:
   3552 	case WM_T_82542_2_1:
   3553 	case WM_T_82543:
   3554 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3555 		CSR_WRITE_FLUSH(sc);
   3556 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3557 		CSR_WRITE_FLUSH(sc);
   3558 		break;
   3559 	case WM_T_PCH2:
   3560 	case WM_T_PCH_LPT:
   3561 	case WM_T_PCH_SPT:
   3562 	case WM_T_PCH_CNP:
   3563 		if (idx == 0) {
   3564 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3565 			CSR_WRITE_FLUSH(sc);
   3566 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3567 			CSR_WRITE_FLUSH(sc);
   3568 			return;
   3569 		}
   3570 		if (sc->sc_type != WM_T_PCH2) {
   3571 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3572 			    FWSM_WLOCK_MAC);
   3573 			addrl = WMREG_SHRAL(idx - 1);
   3574 			addrh = WMREG_SHRAH(idx - 1);
   3575 		} else {
   3576 			wlock_mac = 0;
   3577 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3578 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3579 		}
   3580 
   3581 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3582 			rv = wm_get_swflag_ich8lan(sc);
   3583 			if (rv != 0)
   3584 				return;
   3585 			CSR_WRITE(sc, addrl, ral_lo);
   3586 			CSR_WRITE_FLUSH(sc);
   3587 			CSR_WRITE(sc, addrh, ral_hi);
   3588 			CSR_WRITE_FLUSH(sc);
   3589 			wm_put_swflag_ich8lan(sc);
   3590 		}
   3591 
   3592 		break;
   3593 	default:
   3594 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3595 		CSR_WRITE_FLUSH(sc);
   3596 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3597 		CSR_WRITE_FLUSH(sc);
   3598 		break;
   3599 	}
   3600 }
   3601 
   3602 /*
   3603  * wm_mchash:
   3604  *
   3605  *	Compute the hash of the multicast address for the 4096-bit
   3606  *	multicast filter.
   3607  */
   3608 static uint32_t
   3609 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3610 {
   3611 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3612 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3613 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3614 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3615 	uint32_t hash;
   3616 
   3617 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3618 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3619 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3620 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3621 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3622 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3623 		return (hash & 0x3ff);
   3624 	}
   3625 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3626 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3627 
   3628 	return (hash & 0xfff);
   3629 }
   3630 
   3631 /*
   3632  *
   3633  *
   3634  */
   3635 static int
   3636 wm_rar_count(struct wm_softc *sc)
   3637 {
   3638 	int size;
   3639 
   3640 	switch (sc->sc_type) {
   3641 	case WM_T_ICH8:
   3642 		size = WM_RAL_TABSIZE_ICH8 -1;
   3643 		break;
   3644 	case WM_T_ICH9:
   3645 	case WM_T_ICH10:
   3646 	case WM_T_PCH:
   3647 		size = WM_RAL_TABSIZE_ICH8;
   3648 		break;
   3649 	case WM_T_PCH2:
   3650 		size = WM_RAL_TABSIZE_PCH2;
   3651 		break;
   3652 	case WM_T_PCH_LPT:
   3653 	case WM_T_PCH_SPT:
   3654 	case WM_T_PCH_CNP:
   3655 		size = WM_RAL_TABSIZE_PCH_LPT;
   3656 		break;
   3657 	case WM_T_82575:
   3658 	case WM_T_I210:
   3659 	case WM_T_I211:
   3660 		size = WM_RAL_TABSIZE_82575;
   3661 		break;
   3662 	case WM_T_82576:
   3663 	case WM_T_82580:
   3664 		size = WM_RAL_TABSIZE_82576;
   3665 		break;
   3666 	case WM_T_I350:
   3667 	case WM_T_I354:
   3668 		size = WM_RAL_TABSIZE_I350;
   3669 		break;
   3670 	default:
   3671 		size = WM_RAL_TABSIZE;
   3672 	}
   3673 
   3674 	return size;
   3675 }
   3676 
   3677 /*
   3678  * wm_set_filter:
   3679  *
   3680  *	Set up the receive filter.
   3681  */
   3682 static void
   3683 wm_set_filter(struct wm_softc *sc)
   3684 {
   3685 	struct ethercom *ec = &sc->sc_ethercom;
   3686 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3687 	struct ether_multi *enm;
   3688 	struct ether_multistep step;
   3689 	bus_addr_t mta_reg;
   3690 	uint32_t hash, reg, bit;
   3691 	int i, size, ralmax;
   3692 
   3693 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3694 		device_xname(sc->sc_dev), __func__));
   3695 
   3696 	if (sc->sc_type >= WM_T_82544)
   3697 		mta_reg = WMREG_CORDOVA_MTA;
   3698 	else
   3699 		mta_reg = WMREG_MTA;
   3700 
   3701 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3702 
   3703 	if (ifp->if_flags & IFF_BROADCAST)
   3704 		sc->sc_rctl |= RCTL_BAM;
   3705 	if (ifp->if_flags & IFF_PROMISC) {
   3706 		sc->sc_rctl |= RCTL_UPE;
   3707 		ETHER_LOCK(ec);
   3708 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3709 		ETHER_UNLOCK(ec);
   3710 		goto allmulti;
   3711 	}
   3712 
   3713 	/*
   3714 	 * Set the station address in the first RAL slot, and
   3715 	 * clear the remaining slots.
   3716 	 */
   3717 	size = wm_rar_count(sc);
   3718 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3719 
   3720 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3721 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3722 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3723 		switch (i) {
   3724 		case 0:
   3725 			/* We can use all entries */
   3726 			ralmax = size;
   3727 			break;
   3728 		case 1:
   3729 			/* Only RAR[0] */
   3730 			ralmax = 1;
   3731 			break;
   3732 		default:
   3733 			/* Available SHRA + RAR[0] */
   3734 			ralmax = i + 1;
   3735 		}
   3736 	} else
   3737 		ralmax = size;
   3738 	for (i = 1; i < size; i++) {
   3739 		if (i < ralmax)
   3740 			wm_set_ral(sc, NULL, i);
   3741 	}
   3742 
   3743 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3744 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3745 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3746 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3747 		size = WM_ICH8_MC_TABSIZE;
   3748 	else
   3749 		size = WM_MC_TABSIZE;
   3750 	/* Clear out the multicast table. */
   3751 	for (i = 0; i < size; i++) {
   3752 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3753 		CSR_WRITE_FLUSH(sc);
   3754 	}
   3755 
   3756 	ETHER_LOCK(ec);
   3757 	ETHER_FIRST_MULTI(step, ec, enm);
   3758 	while (enm != NULL) {
   3759 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3760 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3761 			ETHER_UNLOCK(ec);
   3762 			/*
   3763 			 * We must listen to a range of multicast addresses.
   3764 			 * For now, just accept all multicasts, rather than
   3765 			 * trying to set only those filter bits needed to match
   3766 			 * the range.  (At this time, the only use of address
   3767 			 * ranges is for IP multicast routing, for which the
   3768 			 * range is big enough to require all bits set.)
   3769 			 */
   3770 			goto allmulti;
   3771 		}
   3772 
   3773 		hash = wm_mchash(sc, enm->enm_addrlo);
   3774 
   3775 		reg = (hash >> 5);
   3776 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3777 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3778 		    || (sc->sc_type == WM_T_PCH2)
   3779 		    || (sc->sc_type == WM_T_PCH_LPT)
   3780 		    || (sc->sc_type == WM_T_PCH_SPT)
   3781 		    || (sc->sc_type == WM_T_PCH_CNP))
   3782 			reg &= 0x1f;
   3783 		else
   3784 			reg &= 0x7f;
   3785 		bit = hash & 0x1f;
   3786 
   3787 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3788 		hash |= 1U << bit;
   3789 
   3790 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3791 			/*
   3792 			 * 82544 Errata 9: Certain register cannot be written
   3793 			 * with particular alignments in PCI-X bus operation
   3794 			 * (FCAH, MTA and VFTA).
   3795 			 */
   3796 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3797 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3798 			CSR_WRITE_FLUSH(sc);
   3799 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3800 			CSR_WRITE_FLUSH(sc);
   3801 		} else {
   3802 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3803 			CSR_WRITE_FLUSH(sc);
   3804 		}
   3805 
   3806 		ETHER_NEXT_MULTI(step, enm);
   3807 	}
   3808 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3809 	ETHER_UNLOCK(ec);
   3810 
   3811 	goto setit;
   3812 
   3813  allmulti:
   3814 	sc->sc_rctl |= RCTL_MPE;
   3815 
   3816  setit:
   3817 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3818 }
   3819 
   3820 /* Reset and init related */
   3821 
   3822 static void
   3823 wm_set_vlan(struct wm_softc *sc)
   3824 {
   3825 
   3826 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3827 		device_xname(sc->sc_dev), __func__));
   3828 
   3829 	/* Deal with VLAN enables. */
   3830 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3831 		sc->sc_ctrl |= CTRL_VME;
   3832 	else
   3833 		sc->sc_ctrl &= ~CTRL_VME;
   3834 
   3835 	/* Write the control registers. */
   3836 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3837 }
   3838 
   3839 static void
   3840 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3841 {
   3842 	uint32_t gcr;
   3843 	pcireg_t ctrl2;
   3844 
   3845 	gcr = CSR_READ(sc, WMREG_GCR);
   3846 
   3847 	/* Only take action if timeout value is defaulted to 0 */
   3848 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3849 		goto out;
   3850 
   3851 	if ((gcr & GCR_CAP_VER2) == 0) {
   3852 		gcr |= GCR_CMPL_TMOUT_10MS;
   3853 		goto out;
   3854 	}
   3855 
   3856 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3857 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3858 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3859 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3860 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3861 
   3862 out:
   3863 	/* Disable completion timeout resend */
   3864 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3865 
   3866 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3867 }
   3868 
   3869 void
   3870 wm_get_auto_rd_done(struct wm_softc *sc)
   3871 {
   3872 	int i;
   3873 
   3874 	/* wait for eeprom to reload */
   3875 	switch (sc->sc_type) {
   3876 	case WM_T_82571:
   3877 	case WM_T_82572:
   3878 	case WM_T_82573:
   3879 	case WM_T_82574:
   3880 	case WM_T_82583:
   3881 	case WM_T_82575:
   3882 	case WM_T_82576:
   3883 	case WM_T_82580:
   3884 	case WM_T_I350:
   3885 	case WM_T_I354:
   3886 	case WM_T_I210:
   3887 	case WM_T_I211:
   3888 	case WM_T_80003:
   3889 	case WM_T_ICH8:
   3890 	case WM_T_ICH9:
   3891 		for (i = 0; i < 10; i++) {
   3892 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3893 				break;
   3894 			delay(1000);
   3895 		}
   3896 		if (i == 10) {
   3897 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3898 			    "complete\n", device_xname(sc->sc_dev));
   3899 		}
   3900 		break;
   3901 	default:
   3902 		break;
   3903 	}
   3904 }
   3905 
   3906 void
   3907 wm_lan_init_done(struct wm_softc *sc)
   3908 {
   3909 	uint32_t reg = 0;
   3910 	int i;
   3911 
   3912 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3913 		device_xname(sc->sc_dev), __func__));
   3914 
   3915 	/* Wait for eeprom to reload */
   3916 	switch (sc->sc_type) {
   3917 	case WM_T_ICH10:
   3918 	case WM_T_PCH:
   3919 	case WM_T_PCH2:
   3920 	case WM_T_PCH_LPT:
   3921 	case WM_T_PCH_SPT:
   3922 	case WM_T_PCH_CNP:
   3923 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3924 			reg = CSR_READ(sc, WMREG_STATUS);
   3925 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3926 				break;
   3927 			delay(100);
   3928 		}
   3929 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3930 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3931 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3932 		}
   3933 		break;
   3934 	default:
   3935 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3936 		    __func__);
   3937 		break;
   3938 	}
   3939 
   3940 	reg &= ~STATUS_LAN_INIT_DONE;
   3941 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3942 }
   3943 
   3944 void
   3945 wm_get_cfg_done(struct wm_softc *sc)
   3946 {
   3947 	int mask;
   3948 	uint32_t reg;
   3949 	int i;
   3950 
   3951 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3952 		device_xname(sc->sc_dev), __func__));
   3953 
   3954 	/* Wait for eeprom to reload */
   3955 	switch (sc->sc_type) {
   3956 	case WM_T_82542_2_0:
   3957 	case WM_T_82542_2_1:
   3958 		/* null */
   3959 		break;
   3960 	case WM_T_82543:
   3961 	case WM_T_82544:
   3962 	case WM_T_82540:
   3963 	case WM_T_82545:
   3964 	case WM_T_82545_3:
   3965 	case WM_T_82546:
   3966 	case WM_T_82546_3:
   3967 	case WM_T_82541:
   3968 	case WM_T_82541_2:
   3969 	case WM_T_82547:
   3970 	case WM_T_82547_2:
   3971 	case WM_T_82573:
   3972 	case WM_T_82574:
   3973 	case WM_T_82583:
   3974 		/* generic */
   3975 		delay(10*1000);
   3976 		break;
   3977 	case WM_T_80003:
   3978 	case WM_T_82571:
   3979 	case WM_T_82572:
   3980 	case WM_T_82575:
   3981 	case WM_T_82576:
   3982 	case WM_T_82580:
   3983 	case WM_T_I350:
   3984 	case WM_T_I354:
   3985 	case WM_T_I210:
   3986 	case WM_T_I211:
   3987 		if (sc->sc_type == WM_T_82571) {
   3988 			/* Only 82571 shares port 0 */
   3989 			mask = EEMNGCTL_CFGDONE_0;
   3990 		} else
   3991 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3992 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3993 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3994 				break;
   3995 			delay(1000);
   3996 		}
   3997 		if (i >= WM_PHY_CFG_TIMEOUT)
   3998 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3999 				device_xname(sc->sc_dev), __func__));
   4000 		break;
   4001 	case WM_T_ICH8:
   4002 	case WM_T_ICH9:
   4003 	case WM_T_ICH10:
   4004 	case WM_T_PCH:
   4005 	case WM_T_PCH2:
   4006 	case WM_T_PCH_LPT:
   4007 	case WM_T_PCH_SPT:
   4008 	case WM_T_PCH_CNP:
   4009 		delay(10*1000);
   4010 		if (sc->sc_type >= WM_T_ICH10)
   4011 			wm_lan_init_done(sc);
   4012 		else
   4013 			wm_get_auto_rd_done(sc);
   4014 
   4015 		/* Clear PHY Reset Asserted bit */
   4016 		reg = CSR_READ(sc, WMREG_STATUS);
   4017 		if ((reg & STATUS_PHYRA) != 0)
   4018 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4019 		break;
   4020 	default:
   4021 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4022 		    __func__);
   4023 		break;
   4024 	}
   4025 }
   4026 
   4027 int
   4028 wm_phy_post_reset(struct wm_softc *sc)
   4029 {
   4030 	device_t dev = sc->sc_dev;
   4031 	uint16_t reg;
   4032 	int rv = 0;
   4033 
   4034 	/* This function is only for ICH8 and newer. */
   4035 	if (sc->sc_type < WM_T_ICH8)
   4036 		return 0;
   4037 
   4038 	if (wm_phy_resetisblocked(sc)) {
   4039 		/* XXX */
   4040 		device_printf(dev, "PHY is blocked\n");
   4041 		return -1;
   4042 	}
   4043 
   4044 	/* Allow time for h/w to get to quiescent state after reset */
   4045 	delay(10*1000);
   4046 
   4047 	/* Perform any necessary post-reset workarounds */
   4048 	if (sc->sc_type == WM_T_PCH)
   4049 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4050 	else if (sc->sc_type == WM_T_PCH2)
   4051 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4052 	if (rv != 0)
   4053 		return rv;
   4054 
   4055 	/* Clear the host wakeup bit after lcd reset */
   4056 	if (sc->sc_type >= WM_T_PCH) {
   4057 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4058 		reg &= ~BM_WUC_HOST_WU_BIT;
   4059 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4060 	}
   4061 
   4062 	/* Configure the LCD with the extended configuration region in NVM */
   4063 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4064 		return rv;
   4065 
   4066 	/* Configure the LCD with the OEM bits in NVM */
   4067 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4068 
   4069 	if (sc->sc_type == WM_T_PCH2) {
   4070 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4071 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4072 			delay(10 * 1000);
   4073 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4074 		}
   4075 		/* Set EEE LPI Update Timer to 200usec */
   4076 		rv = sc->phy.acquire(sc);
   4077 		if (rv)
   4078 			return rv;
   4079 		rv = wm_write_emi_reg_locked(dev,
   4080 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4081 		sc->phy.release(sc);
   4082 	}
   4083 
   4084 	return rv;
   4085 }
   4086 
   4087 /* Only for PCH and newer */
   4088 static int
   4089 wm_write_smbus_addr(struct wm_softc *sc)
   4090 {
   4091 	uint32_t strap, freq;
   4092 	uint16_t phy_data;
   4093 	int rv;
   4094 
   4095 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4096 		device_xname(sc->sc_dev), __func__));
   4097 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4098 
   4099 	strap = CSR_READ(sc, WMREG_STRAP);
   4100 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4101 
   4102 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4103 	if (rv != 0)
   4104 		return -1;
   4105 
   4106 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4107 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4108 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4109 
   4110 	if (sc->sc_phytype == WMPHY_I217) {
   4111 		/* Restore SMBus frequency */
   4112 		if (freq --) {
   4113 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4114 			    | HV_SMB_ADDR_FREQ_HIGH);
   4115 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4116 			    HV_SMB_ADDR_FREQ_LOW);
   4117 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4118 			    HV_SMB_ADDR_FREQ_HIGH);
   4119 		} else
   4120 			DPRINTF(WM_DEBUG_INIT,
   4121 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4122 				device_xname(sc->sc_dev), __func__));
   4123 	}
   4124 
   4125 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4126 	    phy_data);
   4127 }
   4128 
   4129 static int
   4130 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4131 {
   4132 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4133 	uint16_t phy_page = 0;
   4134 	int rv = 0;
   4135 
   4136 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4137 		device_xname(sc->sc_dev), __func__));
   4138 
   4139 	switch (sc->sc_type) {
   4140 	case WM_T_ICH8:
   4141 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4142 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4143 			return 0;
   4144 
   4145 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4146 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4147 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4148 			break;
   4149 		}
   4150 		/* FALLTHROUGH */
   4151 	case WM_T_PCH:
   4152 	case WM_T_PCH2:
   4153 	case WM_T_PCH_LPT:
   4154 	case WM_T_PCH_SPT:
   4155 	case WM_T_PCH_CNP:
   4156 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4157 		break;
   4158 	default:
   4159 		return 0;
   4160 	}
   4161 
   4162 	if ((rv = sc->phy.acquire(sc)) != 0)
   4163 		return rv;
   4164 
   4165 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4166 	if ((reg & sw_cfg_mask) == 0)
   4167 		goto release;
   4168 
   4169 	/*
   4170 	 * Make sure HW does not configure LCD from PHY extended configuration
   4171 	 * before SW configuration
   4172 	 */
   4173 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4174 	if ((sc->sc_type < WM_T_PCH2)
   4175 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4176 		goto release;
   4177 
   4178 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4179 		device_xname(sc->sc_dev), __func__));
   4180 	/* word_addr is in DWORD */
   4181 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4182 
   4183 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4184 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4185 	if (cnf_size == 0)
   4186 		goto release;
   4187 
   4188 	if (((sc->sc_type == WM_T_PCH)
   4189 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4190 	    || (sc->sc_type > WM_T_PCH)) {
   4191 		/*
   4192 		 * HW configures the SMBus address and LEDs when the OEM and
   4193 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4194 		 * are cleared, SW will configure them instead.
   4195 		 */
   4196 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4197 			device_xname(sc->sc_dev), __func__));
   4198 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4199 			goto release;
   4200 
   4201 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4202 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4203 		    (uint16_t)reg);
   4204 		if (rv != 0)
   4205 			goto release;
   4206 	}
   4207 
   4208 	/* Configure LCD from extended configuration region. */
   4209 	for (i = 0; i < cnf_size; i++) {
   4210 		uint16_t reg_data, reg_addr;
   4211 
   4212 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4213 			goto release;
   4214 
   4215 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4216 			goto release;
   4217 
   4218 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4219 			phy_page = reg_data;
   4220 
   4221 		reg_addr &= IGPHY_MAXREGADDR;
   4222 		reg_addr |= phy_page;
   4223 
   4224 		KASSERT(sc->phy.writereg_locked != NULL);
   4225 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4226 		    reg_data);
   4227 	}
   4228 
   4229 release:
   4230 	sc->phy.release(sc);
   4231 	return rv;
   4232 }
   4233 
   4234 /*
   4235  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4236  *  @sc:       pointer to the HW structure
   4237  *  @d0_state: boolean if entering d0 or d3 device state
   4238  *
   4239  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4240  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4241  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4242  */
   4243 int
   4244 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4245 {
   4246 	uint32_t mac_reg;
   4247 	uint16_t oem_reg;
   4248 	int rv;
   4249 
   4250 	if (sc->sc_type < WM_T_PCH)
   4251 		return 0;
   4252 
   4253 	rv = sc->phy.acquire(sc);
   4254 	if (rv != 0)
   4255 		return rv;
   4256 
   4257 	if (sc->sc_type == WM_T_PCH) {
   4258 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4259 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4260 			goto release;
   4261 	}
   4262 
   4263 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4264 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4265 		goto release;
   4266 
   4267 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4268 
   4269 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4270 	if (rv != 0)
   4271 		goto release;
   4272 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4273 
   4274 	if (d0_state) {
   4275 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4276 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4277 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4278 			oem_reg |= HV_OEM_BITS_LPLU;
   4279 	} else {
   4280 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4281 		    != 0)
   4282 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4283 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4284 		    != 0)
   4285 			oem_reg |= HV_OEM_BITS_LPLU;
   4286 	}
   4287 
   4288 	/* Set Restart auto-neg to activate the bits */
   4289 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4290 	    && (wm_phy_resetisblocked(sc) == false))
   4291 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4292 
   4293 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4294 
   4295 release:
   4296 	sc->phy.release(sc);
   4297 
   4298 	return rv;
   4299 }
   4300 
   4301 /* Init hardware bits */
   4302 void
   4303 wm_initialize_hardware_bits(struct wm_softc *sc)
   4304 {
   4305 	uint32_t tarc0, tarc1, reg;
   4306 
   4307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4308 		device_xname(sc->sc_dev), __func__));
   4309 
   4310 	/* For 82571 variant, 80003 and ICHs */
   4311 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4312 	    || (sc->sc_type >= WM_T_80003)) {
   4313 
   4314 		/* Transmit Descriptor Control 0 */
   4315 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4316 		reg |= TXDCTL_COUNT_DESC;
   4317 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4318 
   4319 		/* Transmit Descriptor Control 1 */
   4320 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4321 		reg |= TXDCTL_COUNT_DESC;
   4322 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4323 
   4324 		/* TARC0 */
   4325 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4326 		switch (sc->sc_type) {
   4327 		case WM_T_82571:
   4328 		case WM_T_82572:
   4329 		case WM_T_82573:
   4330 		case WM_T_82574:
   4331 		case WM_T_82583:
   4332 		case WM_T_80003:
   4333 			/* Clear bits 30..27 */
   4334 			tarc0 &= ~__BITS(30, 27);
   4335 			break;
   4336 		default:
   4337 			break;
   4338 		}
   4339 
   4340 		switch (sc->sc_type) {
   4341 		case WM_T_82571:
   4342 		case WM_T_82572:
   4343 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4344 
   4345 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4346 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4347 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4348 			/* 8257[12] Errata No.7 */
   4349 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4350 
   4351 			/* TARC1 bit 28 */
   4352 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4353 				tarc1 &= ~__BIT(28);
   4354 			else
   4355 				tarc1 |= __BIT(28);
   4356 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4357 
   4358 			/*
   4359 			 * 8257[12] Errata No.13
   4360 			 * Disable Dyamic Clock Gating.
   4361 			 */
   4362 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4363 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4364 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4365 			break;
   4366 		case WM_T_82573:
   4367 		case WM_T_82574:
   4368 		case WM_T_82583:
   4369 			if ((sc->sc_type == WM_T_82574)
   4370 			    || (sc->sc_type == WM_T_82583))
   4371 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4372 
   4373 			/* Extended Device Control */
   4374 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4375 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4376 			reg |= __BIT(22);	/* Set bit 22 */
   4377 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4378 
   4379 			/* Device Control */
   4380 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4381 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4382 
   4383 			/* PCIe Control Register */
   4384 			/*
   4385 			 * 82573 Errata (unknown).
   4386 			 *
   4387 			 * 82574 Errata 25 and 82583 Errata 12
   4388 			 * "Dropped Rx Packets":
   4389 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4390 			 */
   4391 			reg = CSR_READ(sc, WMREG_GCR);
   4392 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4393 			CSR_WRITE(sc, WMREG_GCR, reg);
   4394 
   4395 			if ((sc->sc_type == WM_T_82574)
   4396 			    || (sc->sc_type == WM_T_82583)) {
   4397 				/*
   4398 				 * Document says this bit must be set for
   4399 				 * proper operation.
   4400 				 */
   4401 				reg = CSR_READ(sc, WMREG_GCR);
   4402 				reg |= __BIT(22);
   4403 				CSR_WRITE(sc, WMREG_GCR, reg);
   4404 
   4405 				/*
   4406 				 * Apply workaround for hardware errata
   4407 				 * documented in errata docs Fixes issue where
   4408 				 * some error prone or unreliable PCIe
   4409 				 * completions are occurring, particularly
   4410 				 * with ASPM enabled. Without fix, issue can
   4411 				 * cause Tx timeouts.
   4412 				 */
   4413 				reg = CSR_READ(sc, WMREG_GCR2);
   4414 				reg |= __BIT(0);
   4415 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4416 			}
   4417 			break;
   4418 		case WM_T_80003:
   4419 			/* TARC0 */
   4420 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4421 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4422 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4423 
   4424 			/* TARC1 bit 28 */
   4425 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4426 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4427 				tarc1 &= ~__BIT(28);
   4428 			else
   4429 				tarc1 |= __BIT(28);
   4430 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4431 			break;
   4432 		case WM_T_ICH8:
   4433 		case WM_T_ICH9:
   4434 		case WM_T_ICH10:
   4435 		case WM_T_PCH:
   4436 		case WM_T_PCH2:
   4437 		case WM_T_PCH_LPT:
   4438 		case WM_T_PCH_SPT:
   4439 		case WM_T_PCH_CNP:
   4440 			/* TARC0 */
   4441 			if (sc->sc_type == WM_T_ICH8) {
   4442 				/* Set TARC0 bits 29 and 28 */
   4443 				tarc0 |= __BITS(29, 28);
   4444 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4445 				tarc0 |= __BIT(29);
   4446 				/*
   4447 				 *  Drop bit 28. From Linux.
   4448 				 * See I218/I219 spec update
   4449 				 * "5. Buffer Overrun While the I219 is
   4450 				 * Processing DMA Transactions"
   4451 				 */
   4452 				tarc0 &= ~__BIT(28);
   4453 			}
   4454 			/* Set TARC0 bits 23,24,26,27 */
   4455 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4456 
   4457 			/* CTRL_EXT */
   4458 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4459 			reg |= __BIT(22);	/* Set bit 22 */
   4460 			/*
   4461 			 * Enable PHY low-power state when MAC is at D3
   4462 			 * w/o WoL
   4463 			 */
   4464 			if (sc->sc_type >= WM_T_PCH)
   4465 				reg |= CTRL_EXT_PHYPDEN;
   4466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4467 
   4468 			/* TARC1 */
   4469 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4470 			/* bit 28 */
   4471 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4472 				tarc1 &= ~__BIT(28);
   4473 			else
   4474 				tarc1 |= __BIT(28);
   4475 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4476 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4477 
   4478 			/* Device Status */
   4479 			if (sc->sc_type == WM_T_ICH8) {
   4480 				reg = CSR_READ(sc, WMREG_STATUS);
   4481 				reg &= ~__BIT(31);
   4482 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4483 
   4484 			}
   4485 
   4486 			/* IOSFPC */
   4487 			if (sc->sc_type == WM_T_PCH_SPT) {
   4488 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4489 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4490 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4491 			}
   4492 			/*
   4493 			 * Work-around descriptor data corruption issue during
   4494 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4495 			 * capability.
   4496 			 */
   4497 			reg = CSR_READ(sc, WMREG_RFCTL);
   4498 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4499 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4500 			break;
   4501 		default:
   4502 			break;
   4503 		}
   4504 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4505 
   4506 		switch (sc->sc_type) {
   4507 		/*
   4508 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4509 		 * Avoid RSS Hash Value bug.
   4510 		 */
   4511 		case WM_T_82571:
   4512 		case WM_T_82572:
   4513 		case WM_T_82573:
   4514 		case WM_T_80003:
   4515 		case WM_T_ICH8:
   4516 			reg = CSR_READ(sc, WMREG_RFCTL);
   4517 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4518 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4519 			break;
   4520 		case WM_T_82574:
   4521 			/* Use extened Rx descriptor. */
   4522 			reg = CSR_READ(sc, WMREG_RFCTL);
   4523 			reg |= WMREG_RFCTL_EXSTEN;
   4524 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4525 			break;
   4526 		default:
   4527 			break;
   4528 		}
   4529 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4530 		/*
   4531 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4532 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4533 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4534 		 * Correctly by the Device"
   4535 		 *
   4536 		 * I354(C2000) Errata AVR53:
   4537 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4538 		 * Hang"
   4539 		 */
   4540 		reg = CSR_READ(sc, WMREG_RFCTL);
   4541 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4542 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4543 	}
   4544 }
   4545 
   4546 static uint32_t
   4547 wm_rxpbs_adjust_82580(uint32_t val)
   4548 {
   4549 	uint32_t rv = 0;
   4550 
   4551 	if (val < __arraycount(wm_82580_rxpbs_table))
   4552 		rv = wm_82580_rxpbs_table[val];
   4553 
   4554 	return rv;
   4555 }
   4556 
   4557 /*
   4558  * wm_reset_phy:
   4559  *
   4560  *	generic PHY reset function.
   4561  *	Same as e1000_phy_hw_reset_generic()
   4562  */
   4563 static int
   4564 wm_reset_phy(struct wm_softc *sc)
   4565 {
   4566 	uint32_t reg;
   4567 
   4568 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4569 		device_xname(sc->sc_dev), __func__));
   4570 	if (wm_phy_resetisblocked(sc))
   4571 		return -1;
   4572 
   4573 	sc->phy.acquire(sc);
   4574 
   4575 	reg = CSR_READ(sc, WMREG_CTRL);
   4576 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4577 	CSR_WRITE_FLUSH(sc);
   4578 
   4579 	delay(sc->phy.reset_delay_us);
   4580 
   4581 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4582 	CSR_WRITE_FLUSH(sc);
   4583 
   4584 	delay(150);
   4585 
   4586 	sc->phy.release(sc);
   4587 
   4588 	wm_get_cfg_done(sc);
   4589 	wm_phy_post_reset(sc);
   4590 
   4591 	return 0;
   4592 }
   4593 
   4594 /*
   4595  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4596  * so it is enough to check sc->sc_queue[0] only.
   4597  */
   4598 static void
   4599 wm_flush_desc_rings(struct wm_softc *sc)
   4600 {
   4601 	pcireg_t preg;
   4602 	uint32_t reg;
   4603 	struct wm_txqueue *txq;
   4604 	wiseman_txdesc_t *txd;
   4605 	int nexttx;
   4606 	uint32_t rctl;
   4607 
   4608 	/* First, disable MULR fix in FEXTNVM11 */
   4609 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4610 	reg |= FEXTNVM11_DIS_MULRFIX;
   4611 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4612 
   4613 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4614 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4615 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4616 		return;
   4617 
   4618 	/* TX */
   4619 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4620 	    device_xname(sc->sc_dev), preg, reg);
   4621 	reg = CSR_READ(sc, WMREG_TCTL);
   4622 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4623 
   4624 	txq = &sc->sc_queue[0].wmq_txq;
   4625 	nexttx = txq->txq_next;
   4626 	txd = &txq->txq_descs[nexttx];
   4627 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4628 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4629 	txd->wtx_fields.wtxu_status = 0;
   4630 	txd->wtx_fields.wtxu_options = 0;
   4631 	txd->wtx_fields.wtxu_vlan = 0;
   4632 
   4633 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4634 	    BUS_SPACE_BARRIER_WRITE);
   4635 
   4636 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4637 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4638 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4639 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4640 	delay(250);
   4641 
   4642 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4643 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4644 		return;
   4645 
   4646 	/* RX */
   4647 	printf("%s: Need RX flush (reg = %08x)\n",
   4648 	    device_xname(sc->sc_dev), preg);
   4649 	rctl = CSR_READ(sc, WMREG_RCTL);
   4650 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4651 	CSR_WRITE_FLUSH(sc);
   4652 	delay(150);
   4653 
   4654 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4655 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4656 	reg &= 0xffffc000;
   4657 	/*
   4658 	 * Update thresholds: prefetch threshold to 31, host threshold
   4659 	 * to 1 and make sure the granularity is "descriptors" and not
   4660 	 * "cache lines"
   4661 	 */
   4662 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4663 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4664 
   4665 	/* Momentarily enable the RX ring for the changes to take effect */
   4666 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4667 	CSR_WRITE_FLUSH(sc);
   4668 	delay(150);
   4669 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4670 }
   4671 
   4672 /*
   4673  * wm_reset:
   4674  *
   4675  *	Reset the i82542 chip.
   4676  */
   4677 static void
   4678 wm_reset(struct wm_softc *sc)
   4679 {
   4680 	int phy_reset = 0;
   4681 	int i, error = 0;
   4682 	uint32_t reg;
   4683 	uint16_t kmreg;
   4684 	int rv;
   4685 
   4686 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4687 		device_xname(sc->sc_dev), __func__));
   4688 	KASSERT(sc->sc_type != 0);
   4689 
   4690 	/*
   4691 	 * Allocate on-chip memory according to the MTU size.
   4692 	 * The Packet Buffer Allocation register must be written
   4693 	 * before the chip is reset.
   4694 	 */
   4695 	switch (sc->sc_type) {
   4696 	case WM_T_82547:
   4697 	case WM_T_82547_2:
   4698 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4699 		    PBA_22K : PBA_30K;
   4700 		for (i = 0; i < sc->sc_nqueues; i++) {
   4701 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4702 			txq->txq_fifo_head = 0;
   4703 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4704 			txq->txq_fifo_size =
   4705 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4706 			txq->txq_fifo_stall = 0;
   4707 		}
   4708 		break;
   4709 	case WM_T_82571:
   4710 	case WM_T_82572:
   4711 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4712 	case WM_T_80003:
   4713 		sc->sc_pba = PBA_32K;
   4714 		break;
   4715 	case WM_T_82573:
   4716 		sc->sc_pba = PBA_12K;
   4717 		break;
   4718 	case WM_T_82574:
   4719 	case WM_T_82583:
   4720 		sc->sc_pba = PBA_20K;
   4721 		break;
   4722 	case WM_T_82576:
   4723 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4724 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4725 		break;
   4726 	case WM_T_82580:
   4727 	case WM_T_I350:
   4728 	case WM_T_I354:
   4729 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4730 		break;
   4731 	case WM_T_I210:
   4732 	case WM_T_I211:
   4733 		sc->sc_pba = PBA_34K;
   4734 		break;
   4735 	case WM_T_ICH8:
   4736 		/* Workaround for a bit corruption issue in FIFO memory */
   4737 		sc->sc_pba = PBA_8K;
   4738 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4739 		break;
   4740 	case WM_T_ICH9:
   4741 	case WM_T_ICH10:
   4742 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4743 		    PBA_14K : PBA_10K;
   4744 		break;
   4745 	case WM_T_PCH:
   4746 	case WM_T_PCH2:	/* XXX 14K? */
   4747 	case WM_T_PCH_LPT:
   4748 	case WM_T_PCH_SPT:
   4749 	case WM_T_PCH_CNP:
   4750 		sc->sc_pba = PBA_26K;
   4751 		break;
   4752 	default:
   4753 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4754 		    PBA_40K : PBA_48K;
   4755 		break;
   4756 	}
   4757 	/*
   4758 	 * Only old or non-multiqueue devices have the PBA register
   4759 	 * XXX Need special handling for 82575.
   4760 	 */
   4761 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4762 	    || (sc->sc_type == WM_T_82575))
   4763 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4764 
   4765 	/* Prevent the PCI-E bus from sticking */
   4766 	if (sc->sc_flags & WM_F_PCIE) {
   4767 		int timeout = 800;
   4768 
   4769 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4770 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4771 
   4772 		while (timeout--) {
   4773 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4774 			    == 0)
   4775 				break;
   4776 			delay(100);
   4777 		}
   4778 		if (timeout == 0)
   4779 			device_printf(sc->sc_dev,
   4780 			    "failed to disable busmastering\n");
   4781 	}
   4782 
   4783 	/* Set the completion timeout for interface */
   4784 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4785 	    || (sc->sc_type == WM_T_82580)
   4786 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4787 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4788 		wm_set_pcie_completion_timeout(sc);
   4789 
   4790 	/* Clear interrupt */
   4791 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4792 	if (wm_is_using_msix(sc)) {
   4793 		if (sc->sc_type != WM_T_82574) {
   4794 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4795 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4796 		} else
   4797 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4798 	}
   4799 
   4800 	/* Stop the transmit and receive processes. */
   4801 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4802 	sc->sc_rctl &= ~RCTL_EN;
   4803 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4804 	CSR_WRITE_FLUSH(sc);
   4805 
   4806 	/* XXX set_tbi_sbp_82543() */
   4807 
   4808 	delay(10*1000);
   4809 
   4810 	/* Must acquire the MDIO ownership before MAC reset */
   4811 	switch (sc->sc_type) {
   4812 	case WM_T_82573:
   4813 	case WM_T_82574:
   4814 	case WM_T_82583:
   4815 		error = wm_get_hw_semaphore_82573(sc);
   4816 		break;
   4817 	default:
   4818 		break;
   4819 	}
   4820 
   4821 	/*
   4822 	 * 82541 Errata 29? & 82547 Errata 28?
   4823 	 * See also the description about PHY_RST bit in CTRL register
   4824 	 * in 8254x_GBe_SDM.pdf.
   4825 	 */
   4826 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4827 		CSR_WRITE(sc, WMREG_CTRL,
   4828 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4829 		CSR_WRITE_FLUSH(sc);
   4830 		delay(5000);
   4831 	}
   4832 
   4833 	switch (sc->sc_type) {
   4834 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4835 	case WM_T_82541:
   4836 	case WM_T_82541_2:
   4837 	case WM_T_82547:
   4838 	case WM_T_82547_2:
   4839 		/*
   4840 		 * On some chipsets, a reset through a memory-mapped write
   4841 		 * cycle can cause the chip to reset before completing the
   4842 		 * write cycle. This causes major headache that can be avoided
   4843 		 * by issuing the reset via indirect register writes through
   4844 		 * I/O space.
   4845 		 *
   4846 		 * So, if we successfully mapped the I/O BAR at attach time,
   4847 		 * use that. Otherwise, try our luck with a memory-mapped
   4848 		 * reset.
   4849 		 */
   4850 		if (sc->sc_flags & WM_F_IOH_VALID)
   4851 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4852 		else
   4853 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4854 		break;
   4855 	case WM_T_82545_3:
   4856 	case WM_T_82546_3:
   4857 		/* Use the shadow control register on these chips. */
   4858 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4859 		break;
   4860 	case WM_T_80003:
   4861 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4862 		sc->phy.acquire(sc);
   4863 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4864 		sc->phy.release(sc);
   4865 		break;
   4866 	case WM_T_ICH8:
   4867 	case WM_T_ICH9:
   4868 	case WM_T_ICH10:
   4869 	case WM_T_PCH:
   4870 	case WM_T_PCH2:
   4871 	case WM_T_PCH_LPT:
   4872 	case WM_T_PCH_SPT:
   4873 	case WM_T_PCH_CNP:
   4874 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4875 		if (wm_phy_resetisblocked(sc) == false) {
   4876 			/*
   4877 			 * Gate automatic PHY configuration by hardware on
   4878 			 * non-managed 82579
   4879 			 */
   4880 			if ((sc->sc_type == WM_T_PCH2)
   4881 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4882 				== 0))
   4883 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4884 
   4885 			reg |= CTRL_PHY_RESET;
   4886 			phy_reset = 1;
   4887 		} else
   4888 			printf("XXX reset is blocked!!!\n");
   4889 		sc->phy.acquire(sc);
   4890 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4891 		/* Don't insert a completion barrier when reset */
   4892 		delay(20*1000);
   4893 		mutex_exit(sc->sc_ich_phymtx);
   4894 		break;
   4895 	case WM_T_82580:
   4896 	case WM_T_I350:
   4897 	case WM_T_I354:
   4898 	case WM_T_I210:
   4899 	case WM_T_I211:
   4900 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4901 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4902 			CSR_WRITE_FLUSH(sc);
   4903 		delay(5000);
   4904 		break;
   4905 	case WM_T_82542_2_0:
   4906 	case WM_T_82542_2_1:
   4907 	case WM_T_82543:
   4908 	case WM_T_82540:
   4909 	case WM_T_82545:
   4910 	case WM_T_82546:
   4911 	case WM_T_82571:
   4912 	case WM_T_82572:
   4913 	case WM_T_82573:
   4914 	case WM_T_82574:
   4915 	case WM_T_82575:
   4916 	case WM_T_82576:
   4917 	case WM_T_82583:
   4918 	default:
   4919 		/* Everything else can safely use the documented method. */
   4920 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4921 		break;
   4922 	}
   4923 
   4924 	/* Must release the MDIO ownership after MAC reset */
   4925 	switch (sc->sc_type) {
   4926 	case WM_T_82573:
   4927 	case WM_T_82574:
   4928 	case WM_T_82583:
   4929 		if (error == 0)
   4930 			wm_put_hw_semaphore_82573(sc);
   4931 		break;
   4932 	default:
   4933 		break;
   4934 	}
   4935 
   4936 	/* Set Phy Config Counter to 50msec */
   4937 	if (sc->sc_type == WM_T_PCH2) {
   4938 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4939 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4940 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4941 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4942 	}
   4943 
   4944 	if (phy_reset != 0)
   4945 		wm_get_cfg_done(sc);
   4946 
   4947 	/* Reload EEPROM */
   4948 	switch (sc->sc_type) {
   4949 	case WM_T_82542_2_0:
   4950 	case WM_T_82542_2_1:
   4951 	case WM_T_82543:
   4952 	case WM_T_82544:
   4953 		delay(10);
   4954 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4955 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4956 		CSR_WRITE_FLUSH(sc);
   4957 		delay(2000);
   4958 		break;
   4959 	case WM_T_82540:
   4960 	case WM_T_82545:
   4961 	case WM_T_82545_3:
   4962 	case WM_T_82546:
   4963 	case WM_T_82546_3:
   4964 		delay(5*1000);
   4965 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4966 		break;
   4967 	case WM_T_82541:
   4968 	case WM_T_82541_2:
   4969 	case WM_T_82547:
   4970 	case WM_T_82547_2:
   4971 		delay(20000);
   4972 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4973 		break;
   4974 	case WM_T_82571:
   4975 	case WM_T_82572:
   4976 	case WM_T_82573:
   4977 	case WM_T_82574:
   4978 	case WM_T_82583:
   4979 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4980 			delay(10);
   4981 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4982 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4983 			CSR_WRITE_FLUSH(sc);
   4984 		}
   4985 		/* check EECD_EE_AUTORD */
   4986 		wm_get_auto_rd_done(sc);
   4987 		/*
   4988 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4989 		 * is set.
   4990 		 */
   4991 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4992 		    || (sc->sc_type == WM_T_82583))
   4993 			delay(25*1000);
   4994 		break;
   4995 	case WM_T_82575:
   4996 	case WM_T_82576:
   4997 	case WM_T_82580:
   4998 	case WM_T_I350:
   4999 	case WM_T_I354:
   5000 	case WM_T_I210:
   5001 	case WM_T_I211:
   5002 	case WM_T_80003:
   5003 		/* check EECD_EE_AUTORD */
   5004 		wm_get_auto_rd_done(sc);
   5005 		break;
   5006 	case WM_T_ICH8:
   5007 	case WM_T_ICH9:
   5008 	case WM_T_ICH10:
   5009 	case WM_T_PCH:
   5010 	case WM_T_PCH2:
   5011 	case WM_T_PCH_LPT:
   5012 	case WM_T_PCH_SPT:
   5013 	case WM_T_PCH_CNP:
   5014 		break;
   5015 	default:
   5016 		panic("%s: unknown type\n", __func__);
   5017 	}
   5018 
   5019 	/* Check whether EEPROM is present or not */
   5020 	switch (sc->sc_type) {
   5021 	case WM_T_82575:
   5022 	case WM_T_82576:
   5023 	case WM_T_82580:
   5024 	case WM_T_I350:
   5025 	case WM_T_I354:
   5026 	case WM_T_ICH8:
   5027 	case WM_T_ICH9:
   5028 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5029 			/* Not found */
   5030 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5031 			if (sc->sc_type == WM_T_82575)
   5032 				wm_reset_init_script_82575(sc);
   5033 		}
   5034 		break;
   5035 	default:
   5036 		break;
   5037 	}
   5038 
   5039 	if (phy_reset != 0)
   5040 		wm_phy_post_reset(sc);
   5041 
   5042 	if ((sc->sc_type == WM_T_82580)
   5043 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5044 		/* Clear global device reset status bit */
   5045 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5046 	}
   5047 
   5048 	/* Clear any pending interrupt events. */
   5049 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5050 	reg = CSR_READ(sc, WMREG_ICR);
   5051 	if (wm_is_using_msix(sc)) {
   5052 		if (sc->sc_type != WM_T_82574) {
   5053 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5054 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5055 		} else
   5056 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5057 	}
   5058 
   5059 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5060 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5061 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5062 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5063 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5064 		reg |= KABGTXD_BGSQLBIAS;
   5065 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5066 	}
   5067 
   5068 	/* Reload sc_ctrl */
   5069 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5070 
   5071 	wm_set_eee(sc);
   5072 
   5073 	/*
   5074 	 * For PCH, this write will make sure that any noise will be detected
   5075 	 * as a CRC error and be dropped rather than show up as a bad packet
   5076 	 * to the DMA engine
   5077 	 */
   5078 	if (sc->sc_type == WM_T_PCH)
   5079 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5080 
   5081 	if (sc->sc_type >= WM_T_82544)
   5082 		CSR_WRITE(sc, WMREG_WUC, 0);
   5083 
   5084 	if (sc->sc_type < WM_T_82575)
   5085 		wm_disable_aspm(sc); /* Workaround for some chips */
   5086 
   5087 	wm_reset_mdicnfg_82580(sc);
   5088 
   5089 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5090 		wm_pll_workaround_i210(sc);
   5091 
   5092 	if (sc->sc_type == WM_T_80003) {
   5093 		/* Default to TRUE to enable the MDIC W/A */
   5094 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5095 
   5096 		rv = wm_kmrn_readreg(sc,
   5097 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5098 		if (rv == 0) {
   5099 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5100 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5101 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5102 			else
   5103 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5104 		}
   5105 	}
   5106 }
   5107 
   5108 /*
   5109  * wm_add_rxbuf:
   5110  *
   5111  *	Add a receive buffer to the indiciated descriptor.
   5112  */
   5113 static int
   5114 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5115 {
   5116 	struct wm_softc *sc = rxq->rxq_sc;
   5117 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5118 	struct mbuf *m;
   5119 	int error;
   5120 
   5121 	KASSERT(mutex_owned(rxq->rxq_lock));
   5122 
   5123 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5124 	if (m == NULL)
   5125 		return ENOBUFS;
   5126 
   5127 	MCLGET(m, M_DONTWAIT);
   5128 	if ((m->m_flags & M_EXT) == 0) {
   5129 		m_freem(m);
   5130 		return ENOBUFS;
   5131 	}
   5132 
   5133 	if (rxs->rxs_mbuf != NULL)
   5134 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5135 
   5136 	rxs->rxs_mbuf = m;
   5137 
   5138 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5139 	/*
   5140 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5141 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5142 	 */
   5143 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5144 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5145 	if (error) {
   5146 		/* XXX XXX XXX */
   5147 		aprint_error_dev(sc->sc_dev,
   5148 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5149 		panic("wm_add_rxbuf");
   5150 	}
   5151 
   5152 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5153 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5154 
   5155 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5156 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5157 			wm_init_rxdesc(rxq, idx);
   5158 	} else
   5159 		wm_init_rxdesc(rxq, idx);
   5160 
   5161 	return 0;
   5162 }
   5163 
   5164 /*
   5165  * wm_rxdrain:
   5166  *
   5167  *	Drain the receive queue.
   5168  */
   5169 static void
   5170 wm_rxdrain(struct wm_rxqueue *rxq)
   5171 {
   5172 	struct wm_softc *sc = rxq->rxq_sc;
   5173 	struct wm_rxsoft *rxs;
   5174 	int i;
   5175 
   5176 	KASSERT(mutex_owned(rxq->rxq_lock));
   5177 
   5178 	for (i = 0; i < WM_NRXDESC; i++) {
   5179 		rxs = &rxq->rxq_soft[i];
   5180 		if (rxs->rxs_mbuf != NULL) {
   5181 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5182 			m_freem(rxs->rxs_mbuf);
   5183 			rxs->rxs_mbuf = NULL;
   5184 		}
   5185 	}
   5186 }
   5187 
   5188 /*
   5189  * Setup registers for RSS.
   5190  *
   5191  * XXX not yet VMDq support
   5192  */
   5193 static void
   5194 wm_init_rss(struct wm_softc *sc)
   5195 {
   5196 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5197 	int i;
   5198 
   5199 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5200 
   5201 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5202 		unsigned int qid, reta_ent;
   5203 
   5204 		qid  = i % sc->sc_nqueues;
   5205 		switch (sc->sc_type) {
   5206 		case WM_T_82574:
   5207 			reta_ent = __SHIFTIN(qid,
   5208 			    RETA_ENT_QINDEX_MASK_82574);
   5209 			break;
   5210 		case WM_T_82575:
   5211 			reta_ent = __SHIFTIN(qid,
   5212 			    RETA_ENT_QINDEX1_MASK_82575);
   5213 			break;
   5214 		default:
   5215 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5216 			break;
   5217 		}
   5218 
   5219 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5220 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5221 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5222 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5223 	}
   5224 
   5225 	rss_getkey((uint8_t *)rss_key);
   5226 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5227 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5228 
   5229 	if (sc->sc_type == WM_T_82574)
   5230 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5231 	else
   5232 		mrqc = MRQC_ENABLE_RSS_MQ;
   5233 
   5234 	/*
   5235 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5236 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5237 	 */
   5238 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5239 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5240 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5241 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5242 
   5243 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5244 }
   5245 
   5246 /*
   5247  * Adjust TX and RX queue numbers which the system actulally uses.
   5248  *
   5249  * The numbers are affected by below parameters.
   5250  *     - The nubmer of hardware queues
   5251  *     - The number of MSI-X vectors (= "nvectors" argument)
   5252  *     - ncpu
   5253  */
   5254 static void
   5255 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5256 {
   5257 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5258 
   5259 	if (nvectors < 2) {
   5260 		sc->sc_nqueues = 1;
   5261 		return;
   5262 	}
   5263 
   5264 	switch (sc->sc_type) {
   5265 	case WM_T_82572:
   5266 		hw_ntxqueues = 2;
   5267 		hw_nrxqueues = 2;
   5268 		break;
   5269 	case WM_T_82574:
   5270 		hw_ntxqueues = 2;
   5271 		hw_nrxqueues = 2;
   5272 		break;
   5273 	case WM_T_82575:
   5274 		hw_ntxqueues = 4;
   5275 		hw_nrxqueues = 4;
   5276 		break;
   5277 	case WM_T_82576:
   5278 		hw_ntxqueues = 16;
   5279 		hw_nrxqueues = 16;
   5280 		break;
   5281 	case WM_T_82580:
   5282 	case WM_T_I350:
   5283 	case WM_T_I354:
   5284 		hw_ntxqueues = 8;
   5285 		hw_nrxqueues = 8;
   5286 		break;
   5287 	case WM_T_I210:
   5288 		hw_ntxqueues = 4;
   5289 		hw_nrxqueues = 4;
   5290 		break;
   5291 	case WM_T_I211:
   5292 		hw_ntxqueues = 2;
   5293 		hw_nrxqueues = 2;
   5294 		break;
   5295 		/*
   5296 		 * As below ethernet controllers does not support MSI-X,
   5297 		 * this driver let them not use multiqueue.
   5298 		 *     - WM_T_80003
   5299 		 *     - WM_T_ICH8
   5300 		 *     - WM_T_ICH9
   5301 		 *     - WM_T_ICH10
   5302 		 *     - WM_T_PCH
   5303 		 *     - WM_T_PCH2
   5304 		 *     - WM_T_PCH_LPT
   5305 		 */
   5306 	default:
   5307 		hw_ntxqueues = 1;
   5308 		hw_nrxqueues = 1;
   5309 		break;
   5310 	}
   5311 
   5312 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5313 
   5314 	/*
   5315 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5316 	 * the number of queues used actually.
   5317 	 */
   5318 	if (nvectors < hw_nqueues + 1)
   5319 		sc->sc_nqueues = nvectors - 1;
   5320 	else
   5321 		sc->sc_nqueues = hw_nqueues;
   5322 
   5323 	/*
   5324 	 * As queues more then cpus cannot improve scaling, we limit
   5325 	 * the number of queues used actually.
   5326 	 */
   5327 	if (ncpu < sc->sc_nqueues)
   5328 		sc->sc_nqueues = ncpu;
   5329 }
   5330 
   5331 static inline bool
   5332 wm_is_using_msix(struct wm_softc *sc)
   5333 {
   5334 
   5335 	return (sc->sc_nintrs > 1);
   5336 }
   5337 
   5338 static inline bool
   5339 wm_is_using_multiqueue(struct wm_softc *sc)
   5340 {
   5341 
   5342 	return (sc->sc_nqueues > 1);
   5343 }
   5344 
   5345 static int
   5346 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5347 {
   5348 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5349 	wmq->wmq_id = qidx;
   5350 	wmq->wmq_intr_idx = intr_idx;
   5351 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5352 #ifdef WM_MPSAFE
   5353 	    | SOFTINT_MPSAFE
   5354 #endif
   5355 	    , wm_handle_queue, wmq);
   5356 	if (wmq->wmq_si != NULL)
   5357 		return 0;
   5358 
   5359 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5360 	    wmq->wmq_id);
   5361 
   5362 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5363 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5364 	return ENOMEM;
   5365 }
   5366 
   5367 /*
   5368  * Both single interrupt MSI and INTx can use this function.
   5369  */
   5370 static int
   5371 wm_setup_legacy(struct wm_softc *sc)
   5372 {
   5373 	pci_chipset_tag_t pc = sc->sc_pc;
   5374 	const char *intrstr = NULL;
   5375 	char intrbuf[PCI_INTRSTR_LEN];
   5376 	int error;
   5377 
   5378 	error = wm_alloc_txrx_queues(sc);
   5379 	if (error) {
   5380 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5381 		    error);
   5382 		return ENOMEM;
   5383 	}
   5384 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5385 	    sizeof(intrbuf));
   5386 #ifdef WM_MPSAFE
   5387 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5388 #endif
   5389 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5390 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5391 	if (sc->sc_ihs[0] == NULL) {
   5392 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5393 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5394 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5395 		return ENOMEM;
   5396 	}
   5397 
   5398 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5399 	sc->sc_nintrs = 1;
   5400 
   5401 	return wm_softint_establish(sc, 0, 0);
   5402 }
   5403 
   5404 static int
   5405 wm_setup_msix(struct wm_softc *sc)
   5406 {
   5407 	void *vih;
   5408 	kcpuset_t *affinity;
   5409 	int qidx, error, intr_idx, txrx_established;
   5410 	pci_chipset_tag_t pc = sc->sc_pc;
   5411 	const char *intrstr = NULL;
   5412 	char intrbuf[PCI_INTRSTR_LEN];
   5413 	char intr_xname[INTRDEVNAMEBUF];
   5414 
   5415 	if (sc->sc_nqueues < ncpu) {
   5416 		/*
   5417 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5418 		 * interrupts start from CPU#1.
   5419 		 */
   5420 		sc->sc_affinity_offset = 1;
   5421 	} else {
   5422 		/*
   5423 		 * In this case, this device use all CPUs. So, we unify
   5424 		 * affinitied cpu_index to msix vector number for readability.
   5425 		 */
   5426 		sc->sc_affinity_offset = 0;
   5427 	}
   5428 
   5429 	error = wm_alloc_txrx_queues(sc);
   5430 	if (error) {
   5431 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5432 		    error);
   5433 		return ENOMEM;
   5434 	}
   5435 
   5436 	kcpuset_create(&affinity, false);
   5437 	intr_idx = 0;
   5438 
   5439 	/*
   5440 	 * TX and RX
   5441 	 */
   5442 	txrx_established = 0;
   5443 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5444 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5445 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5446 
   5447 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5448 		    sizeof(intrbuf));
   5449 #ifdef WM_MPSAFE
   5450 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5451 		    PCI_INTR_MPSAFE, true);
   5452 #endif
   5453 		memset(intr_xname, 0, sizeof(intr_xname));
   5454 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5455 		    device_xname(sc->sc_dev), qidx);
   5456 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5457 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5458 		if (vih == NULL) {
   5459 			aprint_error_dev(sc->sc_dev,
   5460 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5461 			    intrstr ? " at " : "",
   5462 			    intrstr ? intrstr : "");
   5463 
   5464 			goto fail;
   5465 		}
   5466 		kcpuset_zero(affinity);
   5467 		/* Round-robin affinity */
   5468 		kcpuset_set(affinity, affinity_to);
   5469 		error = interrupt_distribute(vih, affinity, NULL);
   5470 		if (error == 0) {
   5471 			aprint_normal_dev(sc->sc_dev,
   5472 			    "for TX and RX interrupting at %s affinity to %u\n",
   5473 			    intrstr, affinity_to);
   5474 		} else {
   5475 			aprint_normal_dev(sc->sc_dev,
   5476 			    "for TX and RX interrupting at %s\n", intrstr);
   5477 		}
   5478 		sc->sc_ihs[intr_idx] = vih;
   5479 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5480 			goto fail;
   5481 		txrx_established++;
   5482 		intr_idx++;
   5483 	}
   5484 
   5485 	/* LINK */
   5486 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5487 	    sizeof(intrbuf));
   5488 #ifdef WM_MPSAFE
   5489 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5490 #endif
   5491 	memset(intr_xname, 0, sizeof(intr_xname));
   5492 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5493 	    device_xname(sc->sc_dev));
   5494 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5495 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5496 	if (vih == NULL) {
   5497 		aprint_error_dev(sc->sc_dev,
   5498 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5499 		    intrstr ? " at " : "",
   5500 		    intrstr ? intrstr : "");
   5501 
   5502 		goto fail;
   5503 	}
   5504 	/* Keep default affinity to LINK interrupt */
   5505 	aprint_normal_dev(sc->sc_dev,
   5506 	    "for LINK interrupting at %s\n", intrstr);
   5507 	sc->sc_ihs[intr_idx] = vih;
   5508 	sc->sc_link_intr_idx = intr_idx;
   5509 
   5510 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5511 	kcpuset_destroy(affinity);
   5512 	return 0;
   5513 
   5514  fail:
   5515 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5516 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5517 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5518 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5519 	}
   5520 
   5521 	kcpuset_destroy(affinity);
   5522 	return ENOMEM;
   5523 }
   5524 
   5525 static void
   5526 wm_unset_stopping_flags(struct wm_softc *sc)
   5527 {
   5528 	int i;
   5529 
   5530 	KASSERT(WM_CORE_LOCKED(sc));
   5531 
   5532 	/* Must unset stopping flags in ascending order. */
   5533 	for (i = 0; i < sc->sc_nqueues; i++) {
   5534 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5535 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5536 
   5537 		mutex_enter(txq->txq_lock);
   5538 		txq->txq_stopping = false;
   5539 		mutex_exit(txq->txq_lock);
   5540 
   5541 		mutex_enter(rxq->rxq_lock);
   5542 		rxq->rxq_stopping = false;
   5543 		mutex_exit(rxq->rxq_lock);
   5544 	}
   5545 
   5546 	sc->sc_core_stopping = false;
   5547 }
   5548 
   5549 static void
   5550 wm_set_stopping_flags(struct wm_softc *sc)
   5551 {
   5552 	int i;
   5553 
   5554 	KASSERT(WM_CORE_LOCKED(sc));
   5555 
   5556 	sc->sc_core_stopping = true;
   5557 
   5558 	/* Must set stopping flags in ascending order. */
   5559 	for (i = 0; i < sc->sc_nqueues; i++) {
   5560 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5561 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5562 
   5563 		mutex_enter(rxq->rxq_lock);
   5564 		rxq->rxq_stopping = true;
   5565 		mutex_exit(rxq->rxq_lock);
   5566 
   5567 		mutex_enter(txq->txq_lock);
   5568 		txq->txq_stopping = true;
   5569 		mutex_exit(txq->txq_lock);
   5570 	}
   5571 }
   5572 
   5573 /*
   5574  * Write interrupt interval value to ITR or EITR
   5575  */
   5576 static void
   5577 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5578 {
   5579 
   5580 	if (!wmq->wmq_set_itr)
   5581 		return;
   5582 
   5583 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5584 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5585 
   5586 		/*
   5587 		 * 82575 doesn't have CNT_INGR field.
   5588 		 * So, overwrite counter field by software.
   5589 		 */
   5590 		if (sc->sc_type == WM_T_82575)
   5591 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5592 		else
   5593 			eitr |= EITR_CNT_INGR;
   5594 
   5595 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5596 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5597 		/*
   5598 		 * 82574 has both ITR and EITR. SET EITR when we use
   5599 		 * the multi queue function with MSI-X.
   5600 		 */
   5601 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5602 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5603 	} else {
   5604 		KASSERT(wmq->wmq_id == 0);
   5605 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5606 	}
   5607 
   5608 	wmq->wmq_set_itr = false;
   5609 }
   5610 
   5611 /*
   5612  * TODO
   5613  * Below dynamic calculation of itr is almost the same as linux igb,
   5614  * however it does not fit to wm(4). So, we will have been disable AIM
   5615  * until we will find appropriate calculation of itr.
   5616  */
   5617 /*
   5618  * calculate interrupt interval value to be going to write register in
   5619  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5620  */
   5621 static void
   5622 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5623 {
   5624 #ifdef NOTYET
   5625 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5626 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5627 	uint32_t avg_size = 0;
   5628 	uint32_t new_itr;
   5629 
   5630 	if (rxq->rxq_packets)
   5631 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5632 	if (txq->txq_packets)
   5633 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5634 
   5635 	if (avg_size == 0) {
   5636 		new_itr = 450; /* restore default value */
   5637 		goto out;
   5638 	}
   5639 
   5640 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5641 	avg_size += 24;
   5642 
   5643 	/* Don't starve jumbo frames */
   5644 	avg_size = uimin(avg_size, 3000);
   5645 
   5646 	/* Give a little boost to mid-size frames */
   5647 	if ((avg_size > 300) && (avg_size < 1200))
   5648 		new_itr = avg_size / 3;
   5649 	else
   5650 		new_itr = avg_size / 2;
   5651 
   5652 out:
   5653 	/*
   5654 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5655 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5656 	 */
   5657 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5658 		new_itr *= 4;
   5659 
   5660 	if (new_itr != wmq->wmq_itr) {
   5661 		wmq->wmq_itr = new_itr;
   5662 		wmq->wmq_set_itr = true;
   5663 	} else
   5664 		wmq->wmq_set_itr = false;
   5665 
   5666 	rxq->rxq_packets = 0;
   5667 	rxq->rxq_bytes = 0;
   5668 	txq->txq_packets = 0;
   5669 	txq->txq_bytes = 0;
   5670 #endif
   5671 }
   5672 
   5673 /*
   5674  * wm_init:		[ifnet interface function]
   5675  *
   5676  *	Initialize the interface.
   5677  */
   5678 static int
   5679 wm_init(struct ifnet *ifp)
   5680 {
   5681 	struct wm_softc *sc = ifp->if_softc;
   5682 	int ret;
   5683 
   5684 	WM_CORE_LOCK(sc);
   5685 	ret = wm_init_locked(ifp);
   5686 	WM_CORE_UNLOCK(sc);
   5687 
   5688 	return ret;
   5689 }
   5690 
   5691 static int
   5692 wm_init_locked(struct ifnet *ifp)
   5693 {
   5694 	struct wm_softc *sc = ifp->if_softc;
   5695 	struct ethercom *ec = &sc->sc_ethercom;
   5696 	int i, j, trynum, error = 0;
   5697 	uint32_t reg;
   5698 
   5699 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5700 		device_xname(sc->sc_dev), __func__));
   5701 	KASSERT(WM_CORE_LOCKED(sc));
   5702 
   5703 	/*
   5704 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5705 	 * There is a small but measurable benefit to avoiding the adjusment
   5706 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5707 	 * on such platforms.  One possibility is that the DMA itself is
   5708 	 * slightly more efficient if the front of the entire packet (instead
   5709 	 * of the front of the headers) is aligned.
   5710 	 *
   5711 	 * Note we must always set align_tweak to 0 if we are using
   5712 	 * jumbo frames.
   5713 	 */
   5714 #ifdef __NO_STRICT_ALIGNMENT
   5715 	sc->sc_align_tweak = 0;
   5716 #else
   5717 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5718 		sc->sc_align_tweak = 0;
   5719 	else
   5720 		sc->sc_align_tweak = 2;
   5721 #endif /* __NO_STRICT_ALIGNMENT */
   5722 
   5723 	/* Cancel any pending I/O. */
   5724 	wm_stop_locked(ifp, 0);
   5725 
   5726 	/* Update statistics before reset */
   5727 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5728 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5729 
   5730 	/* PCH_SPT hardware workaround */
   5731 	if (sc->sc_type == WM_T_PCH_SPT)
   5732 		wm_flush_desc_rings(sc);
   5733 
   5734 	/* Reset the chip to a known state. */
   5735 	wm_reset(sc);
   5736 
   5737 	/*
   5738 	 * AMT based hardware can now take control from firmware
   5739 	 * Do this after reset.
   5740 	 */
   5741 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5742 		wm_get_hw_control(sc);
   5743 
   5744 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5745 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5746 		wm_legacy_irq_quirk_spt(sc);
   5747 
   5748 	/* Init hardware bits */
   5749 	wm_initialize_hardware_bits(sc);
   5750 
   5751 	/* Reset the PHY. */
   5752 	if (sc->sc_flags & WM_F_HAS_MII)
   5753 		wm_gmii_reset(sc);
   5754 
   5755 	if (sc->sc_type >= WM_T_ICH8) {
   5756 		reg = CSR_READ(sc, WMREG_GCR);
   5757 		/*
   5758 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5759 		 * default after reset.
   5760 		 */
   5761 		if (sc->sc_type == WM_T_ICH8)
   5762 			reg |= GCR_NO_SNOOP_ALL;
   5763 		else
   5764 			reg &= ~GCR_NO_SNOOP_ALL;
   5765 		CSR_WRITE(sc, WMREG_GCR, reg);
   5766 	}
   5767 	if ((sc->sc_type >= WM_T_ICH8)
   5768 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5769 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5770 
   5771 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5772 		reg |= CTRL_EXT_RO_DIS;
   5773 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5774 	}
   5775 
   5776 	/* Calculate (E)ITR value */
   5777 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5778 		/*
   5779 		 * For NEWQUEUE's EITR (except for 82575).
   5780 		 * 82575's EITR should be set same throttling value as other
   5781 		 * old controllers' ITR because the interrupt/sec calculation
   5782 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5783 		 *
   5784 		 * 82574's EITR should be set same throttling value as ITR.
   5785 		 *
   5786 		 * For N interrupts/sec, set this value to:
   5787 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5788 		 */
   5789 		sc->sc_itr_init = 450;
   5790 	} else if (sc->sc_type >= WM_T_82543) {
   5791 		/*
   5792 		 * Set up the interrupt throttling register (units of 256ns)
   5793 		 * Note that a footnote in Intel's documentation says this
   5794 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5795 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5796 		 * that that is also true for the 1024ns units of the other
   5797 		 * interrupt-related timer registers -- so, really, we ought
   5798 		 * to divide this value by 4 when the link speed is low.
   5799 		 *
   5800 		 * XXX implement this division at link speed change!
   5801 		 */
   5802 
   5803 		/*
   5804 		 * For N interrupts/sec, set this value to:
   5805 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5806 		 * absolute and packet timer values to this value
   5807 		 * divided by 4 to get "simple timer" behavior.
   5808 		 */
   5809 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5810 	}
   5811 
   5812 	error = wm_init_txrx_queues(sc);
   5813 	if (error)
   5814 		goto out;
   5815 
   5816 	/* Clear out the VLAN table -- we don't use it (yet). */
   5817 	CSR_WRITE(sc, WMREG_VET, 0);
   5818 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5819 		trynum = 10; /* Due to hw errata */
   5820 	else
   5821 		trynum = 1;
   5822 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5823 		for (j = 0; j < trynum; j++)
   5824 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5825 
   5826 	/*
   5827 	 * Set up flow-control parameters.
   5828 	 *
   5829 	 * XXX Values could probably stand some tuning.
   5830 	 */
   5831 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5832 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5833 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5834 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5835 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5836 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5837 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5838 	}
   5839 
   5840 	sc->sc_fcrtl = FCRTL_DFLT;
   5841 	if (sc->sc_type < WM_T_82543) {
   5842 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5843 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5844 	} else {
   5845 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5846 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5847 	}
   5848 
   5849 	if (sc->sc_type == WM_T_80003)
   5850 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5851 	else
   5852 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5853 
   5854 	/* Writes the control register. */
   5855 	wm_set_vlan(sc);
   5856 
   5857 	if (sc->sc_flags & WM_F_HAS_MII) {
   5858 		uint16_t kmreg;
   5859 
   5860 		switch (sc->sc_type) {
   5861 		case WM_T_80003:
   5862 		case WM_T_ICH8:
   5863 		case WM_T_ICH9:
   5864 		case WM_T_ICH10:
   5865 		case WM_T_PCH:
   5866 		case WM_T_PCH2:
   5867 		case WM_T_PCH_LPT:
   5868 		case WM_T_PCH_SPT:
   5869 		case WM_T_PCH_CNP:
   5870 			/*
   5871 			 * Set the mac to wait the maximum time between each
   5872 			 * iteration and increase the max iterations when
   5873 			 * polling the phy; this fixes erroneous timeouts at
   5874 			 * 10Mbps.
   5875 			 */
   5876 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5877 			    0xFFFF);
   5878 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5879 			    &kmreg);
   5880 			kmreg |= 0x3F;
   5881 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5882 			    kmreg);
   5883 			break;
   5884 		default:
   5885 			break;
   5886 		}
   5887 
   5888 		if (sc->sc_type == WM_T_80003) {
   5889 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5890 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5891 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5892 
   5893 			/* Bypass RX and TX FIFO's */
   5894 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5895 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5896 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5897 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5898 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5899 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5900 		}
   5901 	}
   5902 #if 0
   5903 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5904 #endif
   5905 
   5906 	/* Set up checksum offload parameters. */
   5907 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5908 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5909 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5910 		reg |= RXCSUM_IPOFL;
   5911 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5912 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5913 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5914 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5915 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5916 
   5917 	/* Set registers about MSI-X */
   5918 	if (wm_is_using_msix(sc)) {
   5919 		uint32_t ivar, qintr_idx;
   5920 		struct wm_queue *wmq;
   5921 		unsigned int qid;
   5922 
   5923 		if (sc->sc_type == WM_T_82575) {
   5924 			/* Interrupt control */
   5925 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5926 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5927 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5928 
   5929 			/* TX and RX */
   5930 			for (i = 0; i < sc->sc_nqueues; i++) {
   5931 				wmq = &sc->sc_queue[i];
   5932 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5933 				    EITR_TX_QUEUE(wmq->wmq_id)
   5934 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5935 			}
   5936 			/* Link status */
   5937 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5938 			    EITR_OTHER);
   5939 		} else if (sc->sc_type == WM_T_82574) {
   5940 			/* Interrupt control */
   5941 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5942 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5943 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5944 
   5945 			/*
   5946 			 * Workaround issue with spurious interrupts
   5947 			 * in MSI-X mode.
   5948 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5949 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5950 			 */
   5951 			reg = CSR_READ(sc, WMREG_RFCTL);
   5952 			reg |= WMREG_RFCTL_ACKDIS;
   5953 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5954 
   5955 			ivar = 0;
   5956 			/* TX and RX */
   5957 			for (i = 0; i < sc->sc_nqueues; i++) {
   5958 				wmq = &sc->sc_queue[i];
   5959 				qid = wmq->wmq_id;
   5960 				qintr_idx = wmq->wmq_intr_idx;
   5961 
   5962 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5963 				    IVAR_TX_MASK_Q_82574(qid));
   5964 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5965 				    IVAR_RX_MASK_Q_82574(qid));
   5966 			}
   5967 			/* Link status */
   5968 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5969 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5970 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5971 		} else {
   5972 			/* Interrupt control */
   5973 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5974 			    | GPIE_EIAME | GPIE_PBA);
   5975 
   5976 			switch (sc->sc_type) {
   5977 			case WM_T_82580:
   5978 			case WM_T_I350:
   5979 			case WM_T_I354:
   5980 			case WM_T_I210:
   5981 			case WM_T_I211:
   5982 				/* TX and RX */
   5983 				for (i = 0; i < sc->sc_nqueues; i++) {
   5984 					wmq = &sc->sc_queue[i];
   5985 					qid = wmq->wmq_id;
   5986 					qintr_idx = wmq->wmq_intr_idx;
   5987 
   5988 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5989 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5990 					ivar |= __SHIFTIN((qintr_idx
   5991 						| IVAR_VALID),
   5992 					    IVAR_TX_MASK_Q(qid));
   5993 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5994 					ivar |= __SHIFTIN((qintr_idx
   5995 						| IVAR_VALID),
   5996 					    IVAR_RX_MASK_Q(qid));
   5997 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5998 				}
   5999 				break;
   6000 			case WM_T_82576:
   6001 				/* TX and RX */
   6002 				for (i = 0; i < sc->sc_nqueues; i++) {
   6003 					wmq = &sc->sc_queue[i];
   6004 					qid = wmq->wmq_id;
   6005 					qintr_idx = wmq->wmq_intr_idx;
   6006 
   6007 					ivar = CSR_READ(sc,
   6008 					    WMREG_IVAR_Q_82576(qid));
   6009 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6010 					ivar |= __SHIFTIN((qintr_idx
   6011 						| IVAR_VALID),
   6012 					    IVAR_TX_MASK_Q_82576(qid));
   6013 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6014 					ivar |= __SHIFTIN((qintr_idx
   6015 						| IVAR_VALID),
   6016 					    IVAR_RX_MASK_Q_82576(qid));
   6017 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6018 					    ivar);
   6019 				}
   6020 				break;
   6021 			default:
   6022 				break;
   6023 			}
   6024 
   6025 			/* Link status */
   6026 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6027 			    IVAR_MISC_OTHER);
   6028 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6029 		}
   6030 
   6031 		if (wm_is_using_multiqueue(sc)) {
   6032 			wm_init_rss(sc);
   6033 
   6034 			/*
   6035 			** NOTE: Receive Full-Packet Checksum Offload
   6036 			** is mutually exclusive with Multiqueue. However
   6037 			** this is not the same as TCP/IP checksums which
   6038 			** still work.
   6039 			*/
   6040 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6041 			reg |= RXCSUM_PCSD;
   6042 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6043 		}
   6044 	}
   6045 
   6046 	/* Set up the interrupt registers. */
   6047 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6048 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6049 	    ICR_RXO | ICR_RXT0;
   6050 	if (wm_is_using_msix(sc)) {
   6051 		uint32_t mask;
   6052 		struct wm_queue *wmq;
   6053 
   6054 		switch (sc->sc_type) {
   6055 		case WM_T_82574:
   6056 			mask = 0;
   6057 			for (i = 0; i < sc->sc_nqueues; i++) {
   6058 				wmq = &sc->sc_queue[i];
   6059 				mask |= ICR_TXQ(wmq->wmq_id);
   6060 				mask |= ICR_RXQ(wmq->wmq_id);
   6061 			}
   6062 			mask |= ICR_OTHER;
   6063 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6064 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6065 			break;
   6066 		default:
   6067 			if (sc->sc_type == WM_T_82575) {
   6068 				mask = 0;
   6069 				for (i = 0; i < sc->sc_nqueues; i++) {
   6070 					wmq = &sc->sc_queue[i];
   6071 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6072 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6073 				}
   6074 				mask |= EITR_OTHER;
   6075 			} else {
   6076 				mask = 0;
   6077 				for (i = 0; i < sc->sc_nqueues; i++) {
   6078 					wmq = &sc->sc_queue[i];
   6079 					mask |= 1 << wmq->wmq_intr_idx;
   6080 				}
   6081 				mask |= 1 << sc->sc_link_intr_idx;
   6082 			}
   6083 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6084 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6085 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6086 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6087 			break;
   6088 		}
   6089 	} else
   6090 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6091 
   6092 	/* Set up the inter-packet gap. */
   6093 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6094 
   6095 	if (sc->sc_type >= WM_T_82543) {
   6096 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6097 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6098 			wm_itrs_writereg(sc, wmq);
   6099 		}
   6100 		/*
   6101 		 * Link interrupts occur much less than TX
   6102 		 * interrupts and RX interrupts. So, we don't
   6103 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6104 		 * FreeBSD's if_igb.
   6105 		 */
   6106 	}
   6107 
   6108 	/* Set the VLAN ethernetype. */
   6109 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6110 
   6111 	/*
   6112 	 * Set up the transmit control register; we start out with
   6113 	 * a collision distance suitable for FDX, but update it whe
   6114 	 * we resolve the media type.
   6115 	 */
   6116 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6117 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6118 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6119 	if (sc->sc_type >= WM_T_82571)
   6120 		sc->sc_tctl |= TCTL_MULR;
   6121 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6122 
   6123 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6124 		/* Write TDT after TCTL.EN is set. See the document. */
   6125 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6126 	}
   6127 
   6128 	if (sc->sc_type == WM_T_80003) {
   6129 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6130 		reg &= ~TCTL_EXT_GCEX_MASK;
   6131 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6132 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6133 	}
   6134 
   6135 	/* Set the media. */
   6136 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6137 		goto out;
   6138 
   6139 	/* Configure for OS presence */
   6140 	wm_init_manageability(sc);
   6141 
   6142 	/*
   6143 	 * Set up the receive control register; we actually program the
   6144 	 * register when we set the receive filter. Use multicast address
   6145 	 * offset type 0.
   6146 	 *
   6147 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6148 	 * don't enable that feature.
   6149 	 */
   6150 	sc->sc_mchash_type = 0;
   6151 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6152 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6153 
   6154 	/* 82574 use one buffer extended Rx descriptor. */
   6155 	if (sc->sc_type == WM_T_82574)
   6156 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6157 
   6158 	/*
   6159 	 * The I350 has a bug where it always strips the CRC whether
   6160 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6161 	 */
   6162 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6163 	    || (sc->sc_type == WM_T_I210))
   6164 		sc->sc_rctl |= RCTL_SECRC;
   6165 
   6166 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6167 	    && (ifp->if_mtu > ETHERMTU)) {
   6168 		sc->sc_rctl |= RCTL_LPE;
   6169 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6170 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6171 	}
   6172 
   6173 	if (MCLBYTES == 2048)
   6174 		sc->sc_rctl |= RCTL_2k;
   6175 	else {
   6176 		if (sc->sc_type >= WM_T_82543) {
   6177 			switch (MCLBYTES) {
   6178 			case 4096:
   6179 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6180 				break;
   6181 			case 8192:
   6182 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6183 				break;
   6184 			case 16384:
   6185 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6186 				break;
   6187 			default:
   6188 				panic("wm_init: MCLBYTES %d unsupported",
   6189 				    MCLBYTES);
   6190 				break;
   6191 			}
   6192 		} else
   6193 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6194 	}
   6195 
   6196 	/* Enable ECC */
   6197 	switch (sc->sc_type) {
   6198 	case WM_T_82571:
   6199 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6200 		reg |= PBA_ECC_CORR_EN;
   6201 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6202 		break;
   6203 	case WM_T_PCH_LPT:
   6204 	case WM_T_PCH_SPT:
   6205 	case WM_T_PCH_CNP:
   6206 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6207 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6208 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6209 
   6210 		sc->sc_ctrl |= CTRL_MEHE;
   6211 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6212 		break;
   6213 	default:
   6214 		break;
   6215 	}
   6216 
   6217 	/*
   6218 	 * Set the receive filter.
   6219 	 *
   6220 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6221 	 * the setting of RCTL.EN in wm_set_filter()
   6222 	 */
   6223 	wm_set_filter(sc);
   6224 
   6225 	/* On 575 and later set RDT only if RX enabled */
   6226 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6227 		int qidx;
   6228 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6229 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6230 			for (i = 0; i < WM_NRXDESC; i++) {
   6231 				mutex_enter(rxq->rxq_lock);
   6232 				wm_init_rxdesc(rxq, i);
   6233 				mutex_exit(rxq->rxq_lock);
   6234 
   6235 			}
   6236 		}
   6237 	}
   6238 
   6239 	wm_unset_stopping_flags(sc);
   6240 
   6241 	/* Start the one second link check clock. */
   6242 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6243 
   6244 	/* ...all done! */
   6245 	ifp->if_flags |= IFF_RUNNING;
   6246 	ifp->if_flags &= ~IFF_OACTIVE;
   6247 
   6248  out:
   6249 	/* Save last flags for the callback */
   6250 	sc->sc_if_flags = ifp->if_flags;
   6251 	sc->sc_ec_capenable = ec->ec_capenable;
   6252 	if (error)
   6253 		log(LOG_ERR, "%s: interface not running\n",
   6254 		    device_xname(sc->sc_dev));
   6255 	return error;
   6256 }
   6257 
   6258 /*
   6259  * wm_stop:		[ifnet interface function]
   6260  *
   6261  *	Stop transmission on the interface.
   6262  */
   6263 static void
   6264 wm_stop(struct ifnet *ifp, int disable)
   6265 {
   6266 	struct wm_softc *sc = ifp->if_softc;
   6267 
   6268 	WM_CORE_LOCK(sc);
   6269 	wm_stop_locked(ifp, disable);
   6270 	WM_CORE_UNLOCK(sc);
   6271 }
   6272 
   6273 static void
   6274 wm_stop_locked(struct ifnet *ifp, int disable)
   6275 {
   6276 	struct wm_softc *sc = ifp->if_softc;
   6277 	struct wm_txsoft *txs;
   6278 	int i, qidx;
   6279 
   6280 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6281 		device_xname(sc->sc_dev), __func__));
   6282 	KASSERT(WM_CORE_LOCKED(sc));
   6283 
   6284 	wm_set_stopping_flags(sc);
   6285 
   6286 	/* Stop the one second clock. */
   6287 	callout_stop(&sc->sc_tick_ch);
   6288 
   6289 	/* Stop the 82547 Tx FIFO stall check timer. */
   6290 	if (sc->sc_type == WM_T_82547)
   6291 		callout_stop(&sc->sc_txfifo_ch);
   6292 
   6293 	if (sc->sc_flags & WM_F_HAS_MII) {
   6294 		/* Down the MII. */
   6295 		mii_down(&sc->sc_mii);
   6296 	} else {
   6297 #if 0
   6298 		/* Should we clear PHY's status properly? */
   6299 		wm_reset(sc);
   6300 #endif
   6301 	}
   6302 
   6303 	/* Stop the transmit and receive processes. */
   6304 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6305 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6306 	sc->sc_rctl &= ~RCTL_EN;
   6307 
   6308 	/*
   6309 	 * Clear the interrupt mask to ensure the device cannot assert its
   6310 	 * interrupt line.
   6311 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6312 	 * service any currently pending or shared interrupt.
   6313 	 */
   6314 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6315 	sc->sc_icr = 0;
   6316 	if (wm_is_using_msix(sc)) {
   6317 		if (sc->sc_type != WM_T_82574) {
   6318 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6319 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6320 		} else
   6321 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6322 	}
   6323 
   6324 	/* Release any queued transmit buffers. */
   6325 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6326 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6327 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6328 		mutex_enter(txq->txq_lock);
   6329 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6330 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6331 			txs = &txq->txq_soft[i];
   6332 			if (txs->txs_mbuf != NULL) {
   6333 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6334 				m_freem(txs->txs_mbuf);
   6335 				txs->txs_mbuf = NULL;
   6336 			}
   6337 		}
   6338 		mutex_exit(txq->txq_lock);
   6339 	}
   6340 
   6341 	/* Mark the interface as down and cancel the watchdog timer. */
   6342 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6343 
   6344 	if (disable) {
   6345 		for (i = 0; i < sc->sc_nqueues; i++) {
   6346 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6347 			mutex_enter(rxq->rxq_lock);
   6348 			wm_rxdrain(rxq);
   6349 			mutex_exit(rxq->rxq_lock);
   6350 		}
   6351 	}
   6352 
   6353 #if 0 /* notyet */
   6354 	if (sc->sc_type >= WM_T_82544)
   6355 		CSR_WRITE(sc, WMREG_WUC, 0);
   6356 #endif
   6357 }
   6358 
   6359 static void
   6360 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6361 {
   6362 	struct mbuf *m;
   6363 	int i;
   6364 
   6365 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6366 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6367 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6368 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6369 		    m->m_data, m->m_len, m->m_flags);
   6370 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6371 	    i, i == 1 ? "" : "s");
   6372 }
   6373 
   6374 /*
   6375  * wm_82547_txfifo_stall:
   6376  *
   6377  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6378  *	reset the FIFO pointers, and restart packet transmission.
   6379  */
   6380 static void
   6381 wm_82547_txfifo_stall(void *arg)
   6382 {
   6383 	struct wm_softc *sc = arg;
   6384 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6385 
   6386 	mutex_enter(txq->txq_lock);
   6387 
   6388 	if (txq->txq_stopping)
   6389 		goto out;
   6390 
   6391 	if (txq->txq_fifo_stall) {
   6392 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6393 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6394 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6395 			/*
   6396 			 * Packets have drained.  Stop transmitter, reset
   6397 			 * FIFO pointers, restart transmitter, and kick
   6398 			 * the packet queue.
   6399 			 */
   6400 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6401 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6402 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6403 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6404 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6405 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6406 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6407 			CSR_WRITE_FLUSH(sc);
   6408 
   6409 			txq->txq_fifo_head = 0;
   6410 			txq->txq_fifo_stall = 0;
   6411 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6412 		} else {
   6413 			/*
   6414 			 * Still waiting for packets to drain; try again in
   6415 			 * another tick.
   6416 			 */
   6417 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6418 		}
   6419 	}
   6420 
   6421 out:
   6422 	mutex_exit(txq->txq_lock);
   6423 }
   6424 
   6425 /*
   6426  * wm_82547_txfifo_bugchk:
   6427  *
   6428  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6429  *	prevent enqueueing a packet that would wrap around the end
   6430  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6431  *
   6432  *	We do this by checking the amount of space before the end
   6433  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6434  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6435  *	the internal FIFO pointers to the beginning, and restart
   6436  *	transmission on the interface.
   6437  */
   6438 #define	WM_FIFO_HDR		0x10
   6439 #define	WM_82547_PAD_LEN	0x3e0
   6440 static int
   6441 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6442 {
   6443 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6444 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6445 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6446 
   6447 	/* Just return if already stalled. */
   6448 	if (txq->txq_fifo_stall)
   6449 		return 1;
   6450 
   6451 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6452 		/* Stall only occurs in half-duplex mode. */
   6453 		goto send_packet;
   6454 	}
   6455 
   6456 	if (len >= WM_82547_PAD_LEN + space) {
   6457 		txq->txq_fifo_stall = 1;
   6458 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6459 		return 1;
   6460 	}
   6461 
   6462  send_packet:
   6463 	txq->txq_fifo_head += len;
   6464 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6465 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6466 
   6467 	return 0;
   6468 }
   6469 
   6470 static int
   6471 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6472 {
   6473 	int error;
   6474 
   6475 	/*
   6476 	 * Allocate the control data structures, and create and load the
   6477 	 * DMA map for it.
   6478 	 *
   6479 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6480 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6481 	 * both sets within the same 4G segment.
   6482 	 */
   6483 	if (sc->sc_type < WM_T_82544)
   6484 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6485 	else
   6486 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6487 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6488 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6489 	else
   6490 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6491 
   6492 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6493 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6494 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6495 		aprint_error_dev(sc->sc_dev,
   6496 		    "unable to allocate TX control data, error = %d\n",
   6497 		    error);
   6498 		goto fail_0;
   6499 	}
   6500 
   6501 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6502 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6503 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6504 		aprint_error_dev(sc->sc_dev,
   6505 		    "unable to map TX control data, error = %d\n", error);
   6506 		goto fail_1;
   6507 	}
   6508 
   6509 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6510 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6511 		aprint_error_dev(sc->sc_dev,
   6512 		    "unable to create TX control data DMA map, error = %d\n",
   6513 		    error);
   6514 		goto fail_2;
   6515 	}
   6516 
   6517 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6518 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6519 		aprint_error_dev(sc->sc_dev,
   6520 		    "unable to load TX control data DMA map, error = %d\n",
   6521 		    error);
   6522 		goto fail_3;
   6523 	}
   6524 
   6525 	return 0;
   6526 
   6527  fail_3:
   6528 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6529  fail_2:
   6530 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6531 	    WM_TXDESCS_SIZE(txq));
   6532  fail_1:
   6533 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6534  fail_0:
   6535 	return error;
   6536 }
   6537 
   6538 static void
   6539 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6540 {
   6541 
   6542 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6543 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6544 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6545 	    WM_TXDESCS_SIZE(txq));
   6546 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6547 }
   6548 
   6549 static int
   6550 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6551 {
   6552 	int error;
   6553 	size_t rxq_descs_size;
   6554 
   6555 	/*
   6556 	 * Allocate the control data structures, and create and load the
   6557 	 * DMA map for it.
   6558 	 *
   6559 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6560 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6561 	 * both sets within the same 4G segment.
   6562 	 */
   6563 	rxq->rxq_ndesc = WM_NRXDESC;
   6564 	if (sc->sc_type == WM_T_82574)
   6565 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6566 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6567 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6568 	else
   6569 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6570 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6571 
   6572 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6573 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6574 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6575 		aprint_error_dev(sc->sc_dev,
   6576 		    "unable to allocate RX control data, error = %d\n",
   6577 		    error);
   6578 		goto fail_0;
   6579 	}
   6580 
   6581 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6582 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6583 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6584 		aprint_error_dev(sc->sc_dev,
   6585 		    "unable to map RX control data, error = %d\n", error);
   6586 		goto fail_1;
   6587 	}
   6588 
   6589 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6590 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6591 		aprint_error_dev(sc->sc_dev,
   6592 		    "unable to create RX control data DMA map, error = %d\n",
   6593 		    error);
   6594 		goto fail_2;
   6595 	}
   6596 
   6597 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6598 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6599 		aprint_error_dev(sc->sc_dev,
   6600 		    "unable to load RX control data DMA map, error = %d\n",
   6601 		    error);
   6602 		goto fail_3;
   6603 	}
   6604 
   6605 	return 0;
   6606 
   6607  fail_3:
   6608 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6609  fail_2:
   6610 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6611 	    rxq_descs_size);
   6612  fail_1:
   6613 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6614  fail_0:
   6615 	return error;
   6616 }
   6617 
   6618 static void
   6619 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6620 {
   6621 
   6622 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6623 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6624 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6625 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6626 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6627 }
   6628 
   6629 
   6630 static int
   6631 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6632 {
   6633 	int i, error;
   6634 
   6635 	/* Create the transmit buffer DMA maps. */
   6636 	WM_TXQUEUELEN(txq) =
   6637 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6638 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6639 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6640 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6641 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6642 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6643 			aprint_error_dev(sc->sc_dev,
   6644 			    "unable to create Tx DMA map %d, error = %d\n",
   6645 			    i, error);
   6646 			goto fail;
   6647 		}
   6648 	}
   6649 
   6650 	return 0;
   6651 
   6652  fail:
   6653 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6654 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6655 			bus_dmamap_destroy(sc->sc_dmat,
   6656 			    txq->txq_soft[i].txs_dmamap);
   6657 	}
   6658 	return error;
   6659 }
   6660 
   6661 static void
   6662 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6663 {
   6664 	int i;
   6665 
   6666 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6667 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6668 			bus_dmamap_destroy(sc->sc_dmat,
   6669 			    txq->txq_soft[i].txs_dmamap);
   6670 	}
   6671 }
   6672 
   6673 static int
   6674 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6675 {
   6676 	int i, error;
   6677 
   6678 	/* Create the receive buffer DMA maps. */
   6679 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6680 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6681 			    MCLBYTES, 0, 0,
   6682 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6683 			aprint_error_dev(sc->sc_dev,
   6684 			    "unable to create Rx DMA map %d error = %d\n",
   6685 			    i, error);
   6686 			goto fail;
   6687 		}
   6688 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6689 	}
   6690 
   6691 	return 0;
   6692 
   6693  fail:
   6694 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6695 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6696 			bus_dmamap_destroy(sc->sc_dmat,
   6697 			    rxq->rxq_soft[i].rxs_dmamap);
   6698 	}
   6699 	return error;
   6700 }
   6701 
   6702 static void
   6703 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6704 {
   6705 	int i;
   6706 
   6707 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6708 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6709 			bus_dmamap_destroy(sc->sc_dmat,
   6710 			    rxq->rxq_soft[i].rxs_dmamap);
   6711 	}
   6712 }
   6713 
   6714 /*
   6715  * wm_alloc_quques:
   6716  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6717  */
   6718 static int
   6719 wm_alloc_txrx_queues(struct wm_softc *sc)
   6720 {
   6721 	int i, error, tx_done, rx_done;
   6722 
   6723 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6724 	    KM_SLEEP);
   6725 	if (sc->sc_queue == NULL) {
   6726 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6727 		error = ENOMEM;
   6728 		goto fail_0;
   6729 	}
   6730 
   6731 	/* For transmission */
   6732 	error = 0;
   6733 	tx_done = 0;
   6734 	for (i = 0; i < sc->sc_nqueues; i++) {
   6735 #ifdef WM_EVENT_COUNTERS
   6736 		int j;
   6737 		const char *xname;
   6738 #endif
   6739 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6740 		txq->txq_sc = sc;
   6741 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6742 
   6743 		error = wm_alloc_tx_descs(sc, txq);
   6744 		if (error)
   6745 			break;
   6746 		error = wm_alloc_tx_buffer(sc, txq);
   6747 		if (error) {
   6748 			wm_free_tx_descs(sc, txq);
   6749 			break;
   6750 		}
   6751 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6752 		if (txq->txq_interq == NULL) {
   6753 			wm_free_tx_descs(sc, txq);
   6754 			wm_free_tx_buffer(sc, txq);
   6755 			error = ENOMEM;
   6756 			break;
   6757 		}
   6758 
   6759 #ifdef WM_EVENT_COUNTERS
   6760 		xname = device_xname(sc->sc_dev);
   6761 
   6762 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6764 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6765 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6766 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6772 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6773 
   6774 		for (j = 0; j < WM_NTXSEGS; j++) {
   6775 			snprintf(txq->txq_txseg_evcnt_names[j],
   6776 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6777 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6778 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6779 		}
   6780 
   6781 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6782 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6783 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6784 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6785 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6786 #endif /* WM_EVENT_COUNTERS */
   6787 
   6788 		tx_done++;
   6789 	}
   6790 	if (error)
   6791 		goto fail_1;
   6792 
   6793 	/* For receive */
   6794 	error = 0;
   6795 	rx_done = 0;
   6796 	for (i = 0; i < sc->sc_nqueues; i++) {
   6797 #ifdef WM_EVENT_COUNTERS
   6798 		const char *xname;
   6799 #endif
   6800 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6801 		rxq->rxq_sc = sc;
   6802 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6803 
   6804 		error = wm_alloc_rx_descs(sc, rxq);
   6805 		if (error)
   6806 			break;
   6807 
   6808 		error = wm_alloc_rx_buffer(sc, rxq);
   6809 		if (error) {
   6810 			wm_free_rx_descs(sc, rxq);
   6811 			break;
   6812 		}
   6813 
   6814 #ifdef WM_EVENT_COUNTERS
   6815 		xname = device_xname(sc->sc_dev);
   6816 
   6817 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6818 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6819 
   6820 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6821 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6822 #endif /* WM_EVENT_COUNTERS */
   6823 
   6824 		rx_done++;
   6825 	}
   6826 	if (error)
   6827 		goto fail_2;
   6828 
   6829 	for (i = 0; i < sc->sc_nqueues; i++) {
   6830 		char rndname[16];
   6831 
   6832 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6833 		    device_xname(sc->sc_dev), i);
   6834 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   6835 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   6836 	}
   6837 
   6838 	return 0;
   6839 
   6840  fail_2:
   6841 	for (i = 0; i < rx_done; i++) {
   6842 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6843 		wm_free_rx_buffer(sc, rxq);
   6844 		wm_free_rx_descs(sc, rxq);
   6845 		if (rxq->rxq_lock)
   6846 			mutex_obj_free(rxq->rxq_lock);
   6847 	}
   6848  fail_1:
   6849 	for (i = 0; i < tx_done; i++) {
   6850 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6851 		pcq_destroy(txq->txq_interq);
   6852 		wm_free_tx_buffer(sc, txq);
   6853 		wm_free_tx_descs(sc, txq);
   6854 		if (txq->txq_lock)
   6855 			mutex_obj_free(txq->txq_lock);
   6856 	}
   6857 
   6858 	kmem_free(sc->sc_queue,
   6859 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6860  fail_0:
   6861 	return error;
   6862 }
   6863 
   6864 /*
   6865  * wm_free_quques:
   6866  *	Free {tx,rx}descs and {tx,rx} buffers
   6867  */
   6868 static void
   6869 wm_free_txrx_queues(struct wm_softc *sc)
   6870 {
   6871 	int i;
   6872 
   6873 	for (i = 0; i < sc->sc_nqueues; i++)
   6874 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   6875 
   6876 	for (i = 0; i < sc->sc_nqueues; i++) {
   6877 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6878 
   6879 #ifdef WM_EVENT_COUNTERS
   6880 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6881 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6882 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6883 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6884 #endif /* WM_EVENT_COUNTERS */
   6885 
   6886 		wm_free_rx_buffer(sc, rxq);
   6887 		wm_free_rx_descs(sc, rxq);
   6888 		if (rxq->rxq_lock)
   6889 			mutex_obj_free(rxq->rxq_lock);
   6890 	}
   6891 
   6892 	for (i = 0; i < sc->sc_nqueues; i++) {
   6893 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6894 		struct mbuf *m;
   6895 #ifdef WM_EVENT_COUNTERS
   6896 		int j;
   6897 
   6898 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6899 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6900 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6901 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6902 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6905 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6906 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6907 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6908 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6909 
   6910 		for (j = 0; j < WM_NTXSEGS; j++)
   6911 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6912 
   6913 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6914 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6915 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6916 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6917 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6918 #endif /* WM_EVENT_COUNTERS */
   6919 
   6920 		/* Drain txq_interq */
   6921 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6922 			m_freem(m);
   6923 		pcq_destroy(txq->txq_interq);
   6924 
   6925 		wm_free_tx_buffer(sc, txq);
   6926 		wm_free_tx_descs(sc, txq);
   6927 		if (txq->txq_lock)
   6928 			mutex_obj_free(txq->txq_lock);
   6929 	}
   6930 
   6931 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6932 }
   6933 
   6934 static void
   6935 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6936 {
   6937 
   6938 	KASSERT(mutex_owned(txq->txq_lock));
   6939 
   6940 	/* Initialize the transmit descriptor ring. */
   6941 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6942 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6943 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6944 	txq->txq_free = WM_NTXDESC(txq);
   6945 	txq->txq_next = 0;
   6946 }
   6947 
   6948 static void
   6949 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6950     struct wm_txqueue *txq)
   6951 {
   6952 
   6953 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6954 		device_xname(sc->sc_dev), __func__));
   6955 	KASSERT(mutex_owned(txq->txq_lock));
   6956 
   6957 	if (sc->sc_type < WM_T_82543) {
   6958 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6959 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6960 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6961 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6962 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6963 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6964 	} else {
   6965 		int qid = wmq->wmq_id;
   6966 
   6967 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6968 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6969 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6970 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6971 
   6972 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6973 			/*
   6974 			 * Don't write TDT before TCTL.EN is set.
   6975 			 * See the document.
   6976 			 */
   6977 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6978 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6979 			    | TXDCTL_WTHRESH(0));
   6980 		else {
   6981 			/* XXX should update with AIM? */
   6982 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6983 			if (sc->sc_type >= WM_T_82540) {
   6984 				/* Should be the same */
   6985 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6986 			}
   6987 
   6988 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6989 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6990 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6991 		}
   6992 	}
   6993 }
   6994 
   6995 static void
   6996 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6997 {
   6998 	int i;
   6999 
   7000 	KASSERT(mutex_owned(txq->txq_lock));
   7001 
   7002 	/* Initialize the transmit job descriptors. */
   7003 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7004 		txq->txq_soft[i].txs_mbuf = NULL;
   7005 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7006 	txq->txq_snext = 0;
   7007 	txq->txq_sdirty = 0;
   7008 }
   7009 
   7010 static void
   7011 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7012     struct wm_txqueue *txq)
   7013 {
   7014 
   7015 	KASSERT(mutex_owned(txq->txq_lock));
   7016 
   7017 	/*
   7018 	 * Set up some register offsets that are different between
   7019 	 * the i82542 and the i82543 and later chips.
   7020 	 */
   7021 	if (sc->sc_type < WM_T_82543)
   7022 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7023 	else
   7024 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7025 
   7026 	wm_init_tx_descs(sc, txq);
   7027 	wm_init_tx_regs(sc, wmq, txq);
   7028 	wm_init_tx_buffer(sc, txq);
   7029 
   7030 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7031 	txq->txq_sending = false;
   7032 }
   7033 
   7034 static void
   7035 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7036     struct wm_rxqueue *rxq)
   7037 {
   7038 
   7039 	KASSERT(mutex_owned(rxq->rxq_lock));
   7040 
   7041 	/*
   7042 	 * Initialize the receive descriptor and receive job
   7043 	 * descriptor rings.
   7044 	 */
   7045 	if (sc->sc_type < WM_T_82543) {
   7046 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7047 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7048 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7049 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7050 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7051 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7052 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7053 
   7054 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7055 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7056 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7057 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7058 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7059 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7060 	} else {
   7061 		int qid = wmq->wmq_id;
   7062 
   7063 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7064 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7065 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7066 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7067 
   7068 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7069 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7070 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7071 
   7072 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7073 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7074 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7075 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7076 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7077 			    | RXDCTL_WTHRESH(1));
   7078 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7079 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7080 		} else {
   7081 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7082 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7083 			/* XXX should update with AIM? */
   7084 			CSR_WRITE(sc, WMREG_RDTR,
   7085 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7086 			/* MUST be same */
   7087 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7088 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7089 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7090 		}
   7091 	}
   7092 }
   7093 
   7094 static int
   7095 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7096 {
   7097 	struct wm_rxsoft *rxs;
   7098 	int error, i;
   7099 
   7100 	KASSERT(mutex_owned(rxq->rxq_lock));
   7101 
   7102 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7103 		rxs = &rxq->rxq_soft[i];
   7104 		if (rxs->rxs_mbuf == NULL) {
   7105 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7106 				log(LOG_ERR, "%s: unable to allocate or map "
   7107 				    "rx buffer %d, error = %d\n",
   7108 				    device_xname(sc->sc_dev), i, error);
   7109 				/*
   7110 				 * XXX Should attempt to run with fewer receive
   7111 				 * XXX buffers instead of just failing.
   7112 				 */
   7113 				wm_rxdrain(rxq);
   7114 				return ENOMEM;
   7115 			}
   7116 		} else {
   7117 			/*
   7118 			 * For 82575 and 82576, the RX descriptors must be
   7119 			 * initialized after the setting of RCTL.EN in
   7120 			 * wm_set_filter()
   7121 			 */
   7122 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7123 				wm_init_rxdesc(rxq, i);
   7124 		}
   7125 	}
   7126 	rxq->rxq_ptr = 0;
   7127 	rxq->rxq_discard = 0;
   7128 	WM_RXCHAIN_RESET(rxq);
   7129 
   7130 	return 0;
   7131 }
   7132 
   7133 static int
   7134 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7135     struct wm_rxqueue *rxq)
   7136 {
   7137 
   7138 	KASSERT(mutex_owned(rxq->rxq_lock));
   7139 
   7140 	/*
   7141 	 * Set up some register offsets that are different between
   7142 	 * the i82542 and the i82543 and later chips.
   7143 	 */
   7144 	if (sc->sc_type < WM_T_82543)
   7145 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7146 	else
   7147 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7148 
   7149 	wm_init_rx_regs(sc, wmq, rxq);
   7150 	return wm_init_rx_buffer(sc, rxq);
   7151 }
   7152 
   7153 /*
   7154  * wm_init_quques:
   7155  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7156  */
   7157 static int
   7158 wm_init_txrx_queues(struct wm_softc *sc)
   7159 {
   7160 	int i, error = 0;
   7161 
   7162 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7163 		device_xname(sc->sc_dev), __func__));
   7164 
   7165 	for (i = 0; i < sc->sc_nqueues; i++) {
   7166 		struct wm_queue *wmq = &sc->sc_queue[i];
   7167 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7168 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7169 
   7170 		/*
   7171 		 * TODO
   7172 		 * Currently, use constant variable instead of AIM.
   7173 		 * Furthermore, the interrupt interval of multiqueue which use
   7174 		 * polling mode is less than default value.
   7175 		 * More tuning and AIM are required.
   7176 		 */
   7177 		if (wm_is_using_multiqueue(sc))
   7178 			wmq->wmq_itr = 50;
   7179 		else
   7180 			wmq->wmq_itr = sc->sc_itr_init;
   7181 		wmq->wmq_set_itr = true;
   7182 
   7183 		mutex_enter(txq->txq_lock);
   7184 		wm_init_tx_queue(sc, wmq, txq);
   7185 		mutex_exit(txq->txq_lock);
   7186 
   7187 		mutex_enter(rxq->rxq_lock);
   7188 		error = wm_init_rx_queue(sc, wmq, rxq);
   7189 		mutex_exit(rxq->rxq_lock);
   7190 		if (error)
   7191 			break;
   7192 	}
   7193 
   7194 	return error;
   7195 }
   7196 
   7197 /*
   7198  * wm_tx_offload:
   7199  *
   7200  *	Set up TCP/IP checksumming parameters for the
   7201  *	specified packet.
   7202  */
   7203 static int
   7204 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7205     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7206 {
   7207 	struct mbuf *m0 = txs->txs_mbuf;
   7208 	struct livengood_tcpip_ctxdesc *t;
   7209 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7210 	uint32_t ipcse;
   7211 	struct ether_header *eh;
   7212 	int offset, iphl;
   7213 	uint8_t fields;
   7214 
   7215 	/*
   7216 	 * XXX It would be nice if the mbuf pkthdr had offset
   7217 	 * fields for the protocol headers.
   7218 	 */
   7219 
   7220 	eh = mtod(m0, struct ether_header *);
   7221 	switch (htons(eh->ether_type)) {
   7222 	case ETHERTYPE_IP:
   7223 	case ETHERTYPE_IPV6:
   7224 		offset = ETHER_HDR_LEN;
   7225 		break;
   7226 
   7227 	case ETHERTYPE_VLAN:
   7228 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7229 		break;
   7230 
   7231 	default:
   7232 		/* Don't support this protocol or encapsulation. */
   7233 		*fieldsp = 0;
   7234 		*cmdp = 0;
   7235 		return 0;
   7236 	}
   7237 
   7238 	if ((m0->m_pkthdr.csum_flags &
   7239 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7240 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7241 	} else
   7242 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7243 
   7244 	ipcse = offset + iphl - 1;
   7245 
   7246 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7247 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7248 	seg = 0;
   7249 	fields = 0;
   7250 
   7251 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7252 		int hlen = offset + iphl;
   7253 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7254 
   7255 		if (__predict_false(m0->m_len <
   7256 				    (hlen + sizeof(struct tcphdr)))) {
   7257 			/*
   7258 			 * TCP/IP headers are not in the first mbuf; we need
   7259 			 * to do this the slow and painful way. Let's just
   7260 			 * hope this doesn't happen very often.
   7261 			 */
   7262 			struct tcphdr th;
   7263 
   7264 			WM_Q_EVCNT_INCR(txq, tsopain);
   7265 
   7266 			m_copydata(m0, hlen, sizeof(th), &th);
   7267 			if (v4) {
   7268 				struct ip ip;
   7269 
   7270 				m_copydata(m0, offset, sizeof(ip), &ip);
   7271 				ip.ip_len = 0;
   7272 				m_copyback(m0,
   7273 				    offset + offsetof(struct ip, ip_len),
   7274 				    sizeof(ip.ip_len), &ip.ip_len);
   7275 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7276 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7277 			} else {
   7278 				struct ip6_hdr ip6;
   7279 
   7280 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7281 				ip6.ip6_plen = 0;
   7282 				m_copyback(m0,
   7283 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7284 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7285 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7286 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7287 			}
   7288 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7289 			    sizeof(th.th_sum), &th.th_sum);
   7290 
   7291 			hlen += th.th_off << 2;
   7292 		} else {
   7293 			/*
   7294 			 * TCP/IP headers are in the first mbuf; we can do
   7295 			 * this the easy way.
   7296 			 */
   7297 			struct tcphdr *th;
   7298 
   7299 			if (v4) {
   7300 				struct ip *ip =
   7301 				    (void *)(mtod(m0, char *) + offset);
   7302 				th = (void *)(mtod(m0, char *) + hlen);
   7303 
   7304 				ip->ip_len = 0;
   7305 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7306 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7307 			} else {
   7308 				struct ip6_hdr *ip6 =
   7309 				    (void *)(mtod(m0, char *) + offset);
   7310 				th = (void *)(mtod(m0, char *) + hlen);
   7311 
   7312 				ip6->ip6_plen = 0;
   7313 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7314 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7315 			}
   7316 			hlen += th->th_off << 2;
   7317 		}
   7318 
   7319 		if (v4) {
   7320 			WM_Q_EVCNT_INCR(txq, tso);
   7321 			cmdlen |= WTX_TCPIP_CMD_IP;
   7322 		} else {
   7323 			WM_Q_EVCNT_INCR(txq, tso6);
   7324 			ipcse = 0;
   7325 		}
   7326 		cmd |= WTX_TCPIP_CMD_TSE;
   7327 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7328 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7329 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7330 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7331 	}
   7332 
   7333 	/*
   7334 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7335 	 * offload feature, if we load the context descriptor, we
   7336 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7337 	 */
   7338 
   7339 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7340 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7341 	    WTX_TCPIP_IPCSE(ipcse);
   7342 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7343 		WM_Q_EVCNT_INCR(txq, ipsum);
   7344 		fields |= WTX_IXSM;
   7345 	}
   7346 
   7347 	offset += iphl;
   7348 
   7349 	if (m0->m_pkthdr.csum_flags &
   7350 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7351 		WM_Q_EVCNT_INCR(txq, tusum);
   7352 		fields |= WTX_TXSM;
   7353 		tucs = WTX_TCPIP_TUCSS(offset) |
   7354 		    WTX_TCPIP_TUCSO(offset +
   7355 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7356 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7357 	} else if ((m0->m_pkthdr.csum_flags &
   7358 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7359 		WM_Q_EVCNT_INCR(txq, tusum6);
   7360 		fields |= WTX_TXSM;
   7361 		tucs = WTX_TCPIP_TUCSS(offset) |
   7362 		    WTX_TCPIP_TUCSO(offset +
   7363 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7364 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7365 	} else {
   7366 		/* Just initialize it to a valid TCP context. */
   7367 		tucs = WTX_TCPIP_TUCSS(offset) |
   7368 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7369 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7370 	}
   7371 
   7372 	/*
   7373 	 * We don't have to write context descriptor for every packet
   7374 	 * except for 82574. For 82574, we must write context descriptor
   7375 	 * for every packet when we use two descriptor queues.
   7376 	 * It would be overhead to write context descriptor for every packet,
   7377 	 * however it does not cause problems.
   7378 	 */
   7379 	/* Fill in the context descriptor. */
   7380 	t = (struct livengood_tcpip_ctxdesc *)
   7381 	    &txq->txq_descs[txq->txq_next];
   7382 	t->tcpip_ipcs = htole32(ipcs);
   7383 	t->tcpip_tucs = htole32(tucs);
   7384 	t->tcpip_cmdlen = htole32(cmdlen);
   7385 	t->tcpip_seg = htole32(seg);
   7386 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7387 
   7388 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7389 	txs->txs_ndesc++;
   7390 
   7391 	*cmdp = cmd;
   7392 	*fieldsp = fields;
   7393 
   7394 	return 0;
   7395 }
   7396 
   7397 static inline int
   7398 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7399 {
   7400 	struct wm_softc *sc = ifp->if_softc;
   7401 	u_int cpuid = cpu_index(curcpu());
   7402 
   7403 	/*
   7404 	 * Currently, simple distribute strategy.
   7405 	 * TODO:
   7406 	 * distribute by flowid(RSS has value).
   7407 	 */
   7408 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7409 }
   7410 
   7411 /*
   7412  * wm_start:		[ifnet interface function]
   7413  *
   7414  *	Start packet transmission on the interface.
   7415  */
   7416 static void
   7417 wm_start(struct ifnet *ifp)
   7418 {
   7419 	struct wm_softc *sc = ifp->if_softc;
   7420 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7421 
   7422 #ifdef WM_MPSAFE
   7423 	KASSERT(if_is_mpsafe(ifp));
   7424 #endif
   7425 	/*
   7426 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7427 	 */
   7428 
   7429 	mutex_enter(txq->txq_lock);
   7430 	if (!txq->txq_stopping)
   7431 		wm_start_locked(ifp);
   7432 	mutex_exit(txq->txq_lock);
   7433 }
   7434 
   7435 static void
   7436 wm_start_locked(struct ifnet *ifp)
   7437 {
   7438 	struct wm_softc *sc = ifp->if_softc;
   7439 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7440 
   7441 	wm_send_common_locked(ifp, txq, false);
   7442 }
   7443 
   7444 static int
   7445 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7446 {
   7447 	int qid;
   7448 	struct wm_softc *sc = ifp->if_softc;
   7449 	struct wm_txqueue *txq;
   7450 
   7451 	qid = wm_select_txqueue(ifp, m);
   7452 	txq = &sc->sc_queue[qid].wmq_txq;
   7453 
   7454 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7455 		m_freem(m);
   7456 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7457 		return ENOBUFS;
   7458 	}
   7459 
   7460 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7461 	ifp->if_obytes += m->m_pkthdr.len;
   7462 	if (m->m_flags & M_MCAST)
   7463 		ifp->if_omcasts++;
   7464 
   7465 	if (mutex_tryenter(txq->txq_lock)) {
   7466 		if (!txq->txq_stopping)
   7467 			wm_transmit_locked(ifp, txq);
   7468 		mutex_exit(txq->txq_lock);
   7469 	}
   7470 
   7471 	return 0;
   7472 }
   7473 
   7474 static void
   7475 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7476 {
   7477 
   7478 	wm_send_common_locked(ifp, txq, true);
   7479 }
   7480 
   7481 static void
   7482 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7483     bool is_transmit)
   7484 {
   7485 	struct wm_softc *sc = ifp->if_softc;
   7486 	struct mbuf *m0;
   7487 	struct wm_txsoft *txs;
   7488 	bus_dmamap_t dmamap;
   7489 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7490 	bus_addr_t curaddr;
   7491 	bus_size_t seglen, curlen;
   7492 	uint32_t cksumcmd;
   7493 	uint8_t cksumfields;
   7494 	bool remap = true;
   7495 
   7496 	KASSERT(mutex_owned(txq->txq_lock));
   7497 
   7498 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7499 		return;
   7500 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7501 		return;
   7502 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7503 		return;
   7504 
   7505 	/* Remember the previous number of free descriptors. */
   7506 	ofree = txq->txq_free;
   7507 
   7508 	/*
   7509 	 * Loop through the send queue, setting up transmit descriptors
   7510 	 * until we drain the queue, or use up all available transmit
   7511 	 * descriptors.
   7512 	 */
   7513 	for (;;) {
   7514 		m0 = NULL;
   7515 
   7516 		/* Get a work queue entry. */
   7517 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7518 			wm_txeof(txq, UINT_MAX);
   7519 			if (txq->txq_sfree == 0) {
   7520 				DPRINTF(WM_DEBUG_TX,
   7521 				    ("%s: TX: no free job descriptors\n",
   7522 					device_xname(sc->sc_dev)));
   7523 				WM_Q_EVCNT_INCR(txq, txsstall);
   7524 				break;
   7525 			}
   7526 		}
   7527 
   7528 		/* Grab a packet off the queue. */
   7529 		if (is_transmit)
   7530 			m0 = pcq_get(txq->txq_interq);
   7531 		else
   7532 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7533 		if (m0 == NULL)
   7534 			break;
   7535 
   7536 		DPRINTF(WM_DEBUG_TX,
   7537 		    ("%s: TX: have packet to transmit: %p\n",
   7538 			device_xname(sc->sc_dev), m0));
   7539 
   7540 		txs = &txq->txq_soft[txq->txq_snext];
   7541 		dmamap = txs->txs_dmamap;
   7542 
   7543 		use_tso = (m0->m_pkthdr.csum_flags &
   7544 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7545 
   7546 		/*
   7547 		 * So says the Linux driver:
   7548 		 * The controller does a simple calculation to make sure
   7549 		 * there is enough room in the FIFO before initiating the
   7550 		 * DMA for each buffer. The calc is:
   7551 		 *	4 = ceil(buffer len / MSS)
   7552 		 * To make sure we don't overrun the FIFO, adjust the max
   7553 		 * buffer len if the MSS drops.
   7554 		 */
   7555 		dmamap->dm_maxsegsz =
   7556 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7557 		    ? m0->m_pkthdr.segsz << 2
   7558 		    : WTX_MAX_LEN;
   7559 
   7560 		/*
   7561 		 * Load the DMA map.  If this fails, the packet either
   7562 		 * didn't fit in the allotted number of segments, or we
   7563 		 * were short on resources.  For the too-many-segments
   7564 		 * case, we simply report an error and drop the packet,
   7565 		 * since we can't sanely copy a jumbo packet to a single
   7566 		 * buffer.
   7567 		 */
   7568 retry:
   7569 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7570 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7571 		if (__predict_false(error)) {
   7572 			if (error == EFBIG) {
   7573 				if (remap == true) {
   7574 					struct mbuf *m;
   7575 
   7576 					remap = false;
   7577 					m = m_defrag(m0, M_NOWAIT);
   7578 					if (m != NULL) {
   7579 						WM_Q_EVCNT_INCR(txq, defrag);
   7580 						m0 = m;
   7581 						goto retry;
   7582 					}
   7583 				}
   7584 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7585 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7586 				    "DMA segments, dropping...\n",
   7587 				    device_xname(sc->sc_dev));
   7588 				wm_dump_mbuf_chain(sc, m0);
   7589 				m_freem(m0);
   7590 				continue;
   7591 			}
   7592 			/* Short on resources, just stop for now. */
   7593 			DPRINTF(WM_DEBUG_TX,
   7594 			    ("%s: TX: dmamap load failed: %d\n",
   7595 				device_xname(sc->sc_dev), error));
   7596 			break;
   7597 		}
   7598 
   7599 		segs_needed = dmamap->dm_nsegs;
   7600 		if (use_tso) {
   7601 			/* For sentinel descriptor; see below. */
   7602 			segs_needed++;
   7603 		}
   7604 
   7605 		/*
   7606 		 * Ensure we have enough descriptors free to describe
   7607 		 * the packet. Note, we always reserve one descriptor
   7608 		 * at the end of the ring due to the semantics of the
   7609 		 * TDT register, plus one more in the event we need
   7610 		 * to load offload context.
   7611 		 */
   7612 		if (segs_needed > txq->txq_free - 2) {
   7613 			/*
   7614 			 * Not enough free descriptors to transmit this
   7615 			 * packet.  We haven't committed anything yet,
   7616 			 * so just unload the DMA map, put the packet
   7617 			 * pack on the queue, and punt. Notify the upper
   7618 			 * layer that there are no more slots left.
   7619 			 */
   7620 			DPRINTF(WM_DEBUG_TX,
   7621 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7622 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7623 				segs_needed, txq->txq_free - 1));
   7624 			if (!is_transmit)
   7625 				ifp->if_flags |= IFF_OACTIVE;
   7626 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7627 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7628 			WM_Q_EVCNT_INCR(txq, txdstall);
   7629 			break;
   7630 		}
   7631 
   7632 		/*
   7633 		 * Check for 82547 Tx FIFO bug. We need to do this
   7634 		 * once we know we can transmit the packet, since we
   7635 		 * do some internal FIFO space accounting here.
   7636 		 */
   7637 		if (sc->sc_type == WM_T_82547 &&
   7638 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7639 			DPRINTF(WM_DEBUG_TX,
   7640 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7641 				device_xname(sc->sc_dev)));
   7642 			if (!is_transmit)
   7643 				ifp->if_flags |= IFF_OACTIVE;
   7644 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7645 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7646 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7647 			break;
   7648 		}
   7649 
   7650 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7651 
   7652 		DPRINTF(WM_DEBUG_TX,
   7653 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7654 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7655 
   7656 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7657 
   7658 		/*
   7659 		 * Store a pointer to the packet so that we can free it
   7660 		 * later.
   7661 		 *
   7662 		 * Initially, we consider the number of descriptors the
   7663 		 * packet uses the number of DMA segments.  This may be
   7664 		 * incremented by 1 if we do checksum offload (a descriptor
   7665 		 * is used to set the checksum context).
   7666 		 */
   7667 		txs->txs_mbuf = m0;
   7668 		txs->txs_firstdesc = txq->txq_next;
   7669 		txs->txs_ndesc = segs_needed;
   7670 
   7671 		/* Set up offload parameters for this packet. */
   7672 		if (m0->m_pkthdr.csum_flags &
   7673 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7674 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7675 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7676 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7677 					  &cksumfields) != 0) {
   7678 				/* Error message already displayed. */
   7679 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7680 				continue;
   7681 			}
   7682 		} else {
   7683 			cksumcmd = 0;
   7684 			cksumfields = 0;
   7685 		}
   7686 
   7687 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7688 
   7689 		/* Sync the DMA map. */
   7690 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7691 		    BUS_DMASYNC_PREWRITE);
   7692 
   7693 		/* Initialize the transmit descriptor. */
   7694 		for (nexttx = txq->txq_next, seg = 0;
   7695 		     seg < dmamap->dm_nsegs; seg++) {
   7696 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7697 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7698 			     seglen != 0;
   7699 			     curaddr += curlen, seglen -= curlen,
   7700 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7701 				curlen = seglen;
   7702 
   7703 				/*
   7704 				 * So says the Linux driver:
   7705 				 * Work around for premature descriptor
   7706 				 * write-backs in TSO mode.  Append a
   7707 				 * 4-byte sentinel descriptor.
   7708 				 */
   7709 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7710 				    curlen > 8)
   7711 					curlen -= 4;
   7712 
   7713 				wm_set_dma_addr(
   7714 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7715 				txq->txq_descs[nexttx].wtx_cmdlen
   7716 				    = htole32(cksumcmd | curlen);
   7717 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7718 				    = 0;
   7719 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7720 				    = cksumfields;
   7721 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7722 				lasttx = nexttx;
   7723 
   7724 				DPRINTF(WM_DEBUG_TX,
   7725 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7726 					"len %#04zx\n",
   7727 					device_xname(sc->sc_dev), nexttx,
   7728 					(uint64_t)curaddr, curlen));
   7729 			}
   7730 		}
   7731 
   7732 		KASSERT(lasttx != -1);
   7733 
   7734 		/*
   7735 		 * Set up the command byte on the last descriptor of
   7736 		 * the packet. If we're in the interrupt delay window,
   7737 		 * delay the interrupt.
   7738 		 */
   7739 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7740 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7741 
   7742 		/*
   7743 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7744 		 * up the descriptor to encapsulate the packet for us.
   7745 		 *
   7746 		 * This is only valid on the last descriptor of the packet.
   7747 		 */
   7748 		if (vlan_has_tag(m0)) {
   7749 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7750 			    htole32(WTX_CMD_VLE);
   7751 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7752 			    = htole16(vlan_get_tag(m0));
   7753 		}
   7754 
   7755 		txs->txs_lastdesc = lasttx;
   7756 
   7757 		DPRINTF(WM_DEBUG_TX,
   7758 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7759 			device_xname(sc->sc_dev),
   7760 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7761 
   7762 		/* Sync the descriptors we're using. */
   7763 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7764 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7765 
   7766 		/* Give the packet to the chip. */
   7767 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7768 
   7769 		DPRINTF(WM_DEBUG_TX,
   7770 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7771 
   7772 		DPRINTF(WM_DEBUG_TX,
   7773 		    ("%s: TX: finished transmitting packet, job %d\n",
   7774 			device_xname(sc->sc_dev), txq->txq_snext));
   7775 
   7776 		/* Advance the tx pointer. */
   7777 		txq->txq_free -= txs->txs_ndesc;
   7778 		txq->txq_next = nexttx;
   7779 
   7780 		txq->txq_sfree--;
   7781 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7782 
   7783 		/* Pass the packet to any BPF listeners. */
   7784 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7785 	}
   7786 
   7787 	if (m0 != NULL) {
   7788 		if (!is_transmit)
   7789 			ifp->if_flags |= IFF_OACTIVE;
   7790 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7791 		WM_Q_EVCNT_INCR(txq, descdrop);
   7792 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7793 			__func__));
   7794 		m_freem(m0);
   7795 	}
   7796 
   7797 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7798 		/* No more slots; notify upper layer. */
   7799 		if (!is_transmit)
   7800 			ifp->if_flags |= IFF_OACTIVE;
   7801 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7802 	}
   7803 
   7804 	if (txq->txq_free != ofree) {
   7805 		/* Set a watchdog timer in case the chip flakes out. */
   7806 		txq->txq_lastsent = time_uptime;
   7807 		txq->txq_sending = true;
   7808 	}
   7809 }
   7810 
   7811 /*
   7812  * wm_nq_tx_offload:
   7813  *
   7814  *	Set up TCP/IP checksumming parameters for the
   7815  *	specified packet, for NEWQUEUE devices
   7816  */
   7817 static int
   7818 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7819     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7820 {
   7821 	struct mbuf *m0 = txs->txs_mbuf;
   7822 	uint32_t vl_len, mssidx, cmdc;
   7823 	struct ether_header *eh;
   7824 	int offset, iphl;
   7825 
   7826 	/*
   7827 	 * XXX It would be nice if the mbuf pkthdr had offset
   7828 	 * fields for the protocol headers.
   7829 	 */
   7830 	*cmdlenp = 0;
   7831 	*fieldsp = 0;
   7832 
   7833 	eh = mtod(m0, struct ether_header *);
   7834 	switch (htons(eh->ether_type)) {
   7835 	case ETHERTYPE_IP:
   7836 	case ETHERTYPE_IPV6:
   7837 		offset = ETHER_HDR_LEN;
   7838 		break;
   7839 
   7840 	case ETHERTYPE_VLAN:
   7841 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7842 		break;
   7843 
   7844 	default:
   7845 		/* Don't support this protocol or encapsulation. */
   7846 		*do_csum = false;
   7847 		return 0;
   7848 	}
   7849 	*do_csum = true;
   7850 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7851 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7852 
   7853 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7854 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7855 
   7856 	if ((m0->m_pkthdr.csum_flags &
   7857 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7858 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7859 	} else {
   7860 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7861 	}
   7862 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7863 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7864 
   7865 	if (vlan_has_tag(m0)) {
   7866 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7867 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7868 		*cmdlenp |= NQTX_CMD_VLE;
   7869 	}
   7870 
   7871 	mssidx = 0;
   7872 
   7873 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7874 		int hlen = offset + iphl;
   7875 		int tcp_hlen;
   7876 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7877 
   7878 		if (__predict_false(m0->m_len <
   7879 				    (hlen + sizeof(struct tcphdr)))) {
   7880 			/*
   7881 			 * TCP/IP headers are not in the first mbuf; we need
   7882 			 * to do this the slow and painful way. Let's just
   7883 			 * hope this doesn't happen very often.
   7884 			 */
   7885 			struct tcphdr th;
   7886 
   7887 			WM_Q_EVCNT_INCR(txq, tsopain);
   7888 
   7889 			m_copydata(m0, hlen, sizeof(th), &th);
   7890 			if (v4) {
   7891 				struct ip ip;
   7892 
   7893 				m_copydata(m0, offset, sizeof(ip), &ip);
   7894 				ip.ip_len = 0;
   7895 				m_copyback(m0,
   7896 				    offset + offsetof(struct ip, ip_len),
   7897 				    sizeof(ip.ip_len), &ip.ip_len);
   7898 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7899 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7900 			} else {
   7901 				struct ip6_hdr ip6;
   7902 
   7903 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7904 				ip6.ip6_plen = 0;
   7905 				m_copyback(m0,
   7906 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7907 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7908 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7909 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7910 			}
   7911 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7912 			    sizeof(th.th_sum), &th.th_sum);
   7913 
   7914 			tcp_hlen = th.th_off << 2;
   7915 		} else {
   7916 			/*
   7917 			 * TCP/IP headers are in the first mbuf; we can do
   7918 			 * this the easy way.
   7919 			 */
   7920 			struct tcphdr *th;
   7921 
   7922 			if (v4) {
   7923 				struct ip *ip =
   7924 				    (void *)(mtod(m0, char *) + offset);
   7925 				th = (void *)(mtod(m0, char *) + hlen);
   7926 
   7927 				ip->ip_len = 0;
   7928 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7929 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7930 			} else {
   7931 				struct ip6_hdr *ip6 =
   7932 				    (void *)(mtod(m0, char *) + offset);
   7933 				th = (void *)(mtod(m0, char *) + hlen);
   7934 
   7935 				ip6->ip6_plen = 0;
   7936 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7937 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7938 			}
   7939 			tcp_hlen = th->th_off << 2;
   7940 		}
   7941 		hlen += tcp_hlen;
   7942 		*cmdlenp |= NQTX_CMD_TSE;
   7943 
   7944 		if (v4) {
   7945 			WM_Q_EVCNT_INCR(txq, tso);
   7946 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7947 		} else {
   7948 			WM_Q_EVCNT_INCR(txq, tso6);
   7949 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7950 		}
   7951 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7952 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7953 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7954 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7955 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7956 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7957 	} else {
   7958 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7959 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7960 	}
   7961 
   7962 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7963 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7964 		cmdc |= NQTXC_CMD_IP4;
   7965 	}
   7966 
   7967 	if (m0->m_pkthdr.csum_flags &
   7968 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7969 		WM_Q_EVCNT_INCR(txq, tusum);
   7970 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7971 			cmdc |= NQTXC_CMD_TCP;
   7972 		else
   7973 			cmdc |= NQTXC_CMD_UDP;
   7974 
   7975 		cmdc |= NQTXC_CMD_IP4;
   7976 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7977 	}
   7978 	if (m0->m_pkthdr.csum_flags &
   7979 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7980 		WM_Q_EVCNT_INCR(txq, tusum6);
   7981 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7982 			cmdc |= NQTXC_CMD_TCP;
   7983 		else
   7984 			cmdc |= NQTXC_CMD_UDP;
   7985 
   7986 		cmdc |= NQTXC_CMD_IP6;
   7987 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7988 	}
   7989 
   7990 	/*
   7991 	 * We don't have to write context descriptor for every packet to
   7992 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7993 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7994 	 * controllers.
   7995 	 * It would be overhead to write context descriptor for every packet,
   7996 	 * however it does not cause problems.
   7997 	 */
   7998 	/* Fill in the context descriptor. */
   7999 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8000 	    htole32(vl_len);
   8001 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8002 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8003 	    htole32(cmdc);
   8004 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8005 	    htole32(mssidx);
   8006 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8007 	DPRINTF(WM_DEBUG_TX,
   8008 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8009 		txq->txq_next, 0, vl_len));
   8010 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8011 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8012 	txs->txs_ndesc++;
   8013 	return 0;
   8014 }
   8015 
   8016 /*
   8017  * wm_nq_start:		[ifnet interface function]
   8018  *
   8019  *	Start packet transmission on the interface for NEWQUEUE devices
   8020  */
   8021 static void
   8022 wm_nq_start(struct ifnet *ifp)
   8023 {
   8024 	struct wm_softc *sc = ifp->if_softc;
   8025 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8026 
   8027 #ifdef WM_MPSAFE
   8028 	KASSERT(if_is_mpsafe(ifp));
   8029 #endif
   8030 	/*
   8031 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8032 	 */
   8033 
   8034 	mutex_enter(txq->txq_lock);
   8035 	if (!txq->txq_stopping)
   8036 		wm_nq_start_locked(ifp);
   8037 	mutex_exit(txq->txq_lock);
   8038 }
   8039 
   8040 static void
   8041 wm_nq_start_locked(struct ifnet *ifp)
   8042 {
   8043 	struct wm_softc *sc = ifp->if_softc;
   8044 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8045 
   8046 	wm_nq_send_common_locked(ifp, txq, false);
   8047 }
   8048 
   8049 static int
   8050 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8051 {
   8052 	int qid;
   8053 	struct wm_softc *sc = ifp->if_softc;
   8054 	struct wm_txqueue *txq;
   8055 
   8056 	qid = wm_select_txqueue(ifp, m);
   8057 	txq = &sc->sc_queue[qid].wmq_txq;
   8058 
   8059 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8060 		m_freem(m);
   8061 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8062 		return ENOBUFS;
   8063 	}
   8064 
   8065 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8066 	ifp->if_obytes += m->m_pkthdr.len;
   8067 	if (m->m_flags & M_MCAST)
   8068 		ifp->if_omcasts++;
   8069 
   8070 	/*
   8071 	 * The situations which this mutex_tryenter() fails at running time
   8072 	 * are below two patterns.
   8073 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8074 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8075 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8076 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8077 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8078 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8079 	 * stuck, either.
   8080 	 */
   8081 	if (mutex_tryenter(txq->txq_lock)) {
   8082 		if (!txq->txq_stopping)
   8083 			wm_nq_transmit_locked(ifp, txq);
   8084 		mutex_exit(txq->txq_lock);
   8085 	}
   8086 
   8087 	return 0;
   8088 }
   8089 
   8090 static void
   8091 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8092 {
   8093 
   8094 	wm_nq_send_common_locked(ifp, txq, true);
   8095 }
   8096 
   8097 static void
   8098 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8099     bool is_transmit)
   8100 {
   8101 	struct wm_softc *sc = ifp->if_softc;
   8102 	struct mbuf *m0;
   8103 	struct wm_txsoft *txs;
   8104 	bus_dmamap_t dmamap;
   8105 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8106 	bool do_csum, sent;
   8107 	bool remap = true;
   8108 
   8109 	KASSERT(mutex_owned(txq->txq_lock));
   8110 
   8111 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8112 		return;
   8113 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8114 		return;
   8115 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8116 		return;
   8117 
   8118 	sent = false;
   8119 
   8120 	/*
   8121 	 * Loop through the send queue, setting up transmit descriptors
   8122 	 * until we drain the queue, or use up all available transmit
   8123 	 * descriptors.
   8124 	 */
   8125 	for (;;) {
   8126 		m0 = NULL;
   8127 
   8128 		/* Get a work queue entry. */
   8129 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8130 			wm_txeof(txq, UINT_MAX);
   8131 			if (txq->txq_sfree == 0) {
   8132 				DPRINTF(WM_DEBUG_TX,
   8133 				    ("%s: TX: no free job descriptors\n",
   8134 					device_xname(sc->sc_dev)));
   8135 				WM_Q_EVCNT_INCR(txq, txsstall);
   8136 				break;
   8137 			}
   8138 		}
   8139 
   8140 		/* Grab a packet off the queue. */
   8141 		if (is_transmit)
   8142 			m0 = pcq_get(txq->txq_interq);
   8143 		else
   8144 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8145 		if (m0 == NULL)
   8146 			break;
   8147 
   8148 		DPRINTF(WM_DEBUG_TX,
   8149 		    ("%s: TX: have packet to transmit: %p\n",
   8150 		    device_xname(sc->sc_dev), m0));
   8151 
   8152 		txs = &txq->txq_soft[txq->txq_snext];
   8153 		dmamap = txs->txs_dmamap;
   8154 
   8155 		/*
   8156 		 * Load the DMA map.  If this fails, the packet either
   8157 		 * didn't fit in the allotted number of segments, or we
   8158 		 * were short on resources.  For the too-many-segments
   8159 		 * case, we simply report an error and drop the packet,
   8160 		 * since we can't sanely copy a jumbo packet to a single
   8161 		 * buffer.
   8162 		 */
   8163 retry:
   8164 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8165 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8166 		if (__predict_false(error)) {
   8167 			if (error == EFBIG) {
   8168 				if (remap == true) {
   8169 					struct mbuf *m;
   8170 
   8171 					remap = false;
   8172 					m = m_defrag(m0, M_NOWAIT);
   8173 					if (m != NULL) {
   8174 						WM_Q_EVCNT_INCR(txq, defrag);
   8175 						m0 = m;
   8176 						goto retry;
   8177 					}
   8178 				}
   8179 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8180 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8181 				    "DMA segments, dropping...\n",
   8182 				    device_xname(sc->sc_dev));
   8183 				wm_dump_mbuf_chain(sc, m0);
   8184 				m_freem(m0);
   8185 				continue;
   8186 			}
   8187 			/* Short on resources, just stop for now. */
   8188 			DPRINTF(WM_DEBUG_TX,
   8189 			    ("%s: TX: dmamap load failed: %d\n",
   8190 				device_xname(sc->sc_dev), error));
   8191 			break;
   8192 		}
   8193 
   8194 		segs_needed = dmamap->dm_nsegs;
   8195 
   8196 		/*
   8197 		 * Ensure we have enough descriptors free to describe
   8198 		 * the packet. Note, we always reserve one descriptor
   8199 		 * at the end of the ring due to the semantics of the
   8200 		 * TDT register, plus one more in the event we need
   8201 		 * to load offload context.
   8202 		 */
   8203 		if (segs_needed > txq->txq_free - 2) {
   8204 			/*
   8205 			 * Not enough free descriptors to transmit this
   8206 			 * packet.  We haven't committed anything yet,
   8207 			 * so just unload the DMA map, put the packet
   8208 			 * pack on the queue, and punt. Notify the upper
   8209 			 * layer that there are no more slots left.
   8210 			 */
   8211 			DPRINTF(WM_DEBUG_TX,
   8212 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8213 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8214 				segs_needed, txq->txq_free - 1));
   8215 			if (!is_transmit)
   8216 				ifp->if_flags |= IFF_OACTIVE;
   8217 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8218 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8219 			WM_Q_EVCNT_INCR(txq, txdstall);
   8220 			break;
   8221 		}
   8222 
   8223 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8224 
   8225 		DPRINTF(WM_DEBUG_TX,
   8226 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8227 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8228 
   8229 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8230 
   8231 		/*
   8232 		 * Store a pointer to the packet so that we can free it
   8233 		 * later.
   8234 		 *
   8235 		 * Initially, we consider the number of descriptors the
   8236 		 * packet uses the number of DMA segments.  This may be
   8237 		 * incremented by 1 if we do checksum offload (a descriptor
   8238 		 * is used to set the checksum context).
   8239 		 */
   8240 		txs->txs_mbuf = m0;
   8241 		txs->txs_firstdesc = txq->txq_next;
   8242 		txs->txs_ndesc = segs_needed;
   8243 
   8244 		/* Set up offload parameters for this packet. */
   8245 		uint32_t cmdlen, fields, dcmdlen;
   8246 		if (m0->m_pkthdr.csum_flags &
   8247 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8248 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8249 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8250 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8251 			    &do_csum) != 0) {
   8252 				/* Error message already displayed. */
   8253 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8254 				continue;
   8255 			}
   8256 		} else {
   8257 			do_csum = false;
   8258 			cmdlen = 0;
   8259 			fields = 0;
   8260 		}
   8261 
   8262 		/* Sync the DMA map. */
   8263 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8264 		    BUS_DMASYNC_PREWRITE);
   8265 
   8266 		/* Initialize the first transmit descriptor. */
   8267 		nexttx = txq->txq_next;
   8268 		if (!do_csum) {
   8269 			/* Setup a legacy descriptor */
   8270 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8271 			    dmamap->dm_segs[0].ds_addr);
   8272 			txq->txq_descs[nexttx].wtx_cmdlen =
   8273 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8274 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8275 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8276 			if (vlan_has_tag(m0)) {
   8277 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8278 				    htole32(WTX_CMD_VLE);
   8279 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8280 				    htole16(vlan_get_tag(m0));
   8281 			} else
   8282 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8283 
   8284 			dcmdlen = 0;
   8285 		} else {
   8286 			/* Setup an advanced data descriptor */
   8287 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8288 			    htole64(dmamap->dm_segs[0].ds_addr);
   8289 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8290 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8291 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8292 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8293 			    htole32(fields);
   8294 			DPRINTF(WM_DEBUG_TX,
   8295 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8296 				device_xname(sc->sc_dev), nexttx,
   8297 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8298 			DPRINTF(WM_DEBUG_TX,
   8299 			    ("\t 0x%08x%08x\n", fields,
   8300 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8301 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8302 		}
   8303 
   8304 		lasttx = nexttx;
   8305 		nexttx = WM_NEXTTX(txq, nexttx);
   8306 		/*
   8307 		 * Fill in the next descriptors. legacy or advanced format
   8308 		 * is the same here
   8309 		 */
   8310 		for (seg = 1; seg < dmamap->dm_nsegs;
   8311 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8312 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8313 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8314 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8315 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8316 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8317 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8318 			lasttx = nexttx;
   8319 
   8320 			DPRINTF(WM_DEBUG_TX,
   8321 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8322 				device_xname(sc->sc_dev), nexttx,
   8323 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8324 				dmamap->dm_segs[seg].ds_len));
   8325 		}
   8326 
   8327 		KASSERT(lasttx != -1);
   8328 
   8329 		/*
   8330 		 * Set up the command byte on the last descriptor of
   8331 		 * the packet. If we're in the interrupt delay window,
   8332 		 * delay the interrupt.
   8333 		 */
   8334 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8335 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8336 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8337 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8338 
   8339 		txs->txs_lastdesc = lasttx;
   8340 
   8341 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8342 		    device_xname(sc->sc_dev),
   8343 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8344 
   8345 		/* Sync the descriptors we're using. */
   8346 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8347 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8348 
   8349 		/* Give the packet to the chip. */
   8350 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8351 		sent = true;
   8352 
   8353 		DPRINTF(WM_DEBUG_TX,
   8354 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8355 
   8356 		DPRINTF(WM_DEBUG_TX,
   8357 		    ("%s: TX: finished transmitting packet, job %d\n",
   8358 			device_xname(sc->sc_dev), txq->txq_snext));
   8359 
   8360 		/* Advance the tx pointer. */
   8361 		txq->txq_free -= txs->txs_ndesc;
   8362 		txq->txq_next = nexttx;
   8363 
   8364 		txq->txq_sfree--;
   8365 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8366 
   8367 		/* Pass the packet to any BPF listeners. */
   8368 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8369 	}
   8370 
   8371 	if (m0 != NULL) {
   8372 		if (!is_transmit)
   8373 			ifp->if_flags |= IFF_OACTIVE;
   8374 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8375 		WM_Q_EVCNT_INCR(txq, descdrop);
   8376 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8377 			__func__));
   8378 		m_freem(m0);
   8379 	}
   8380 
   8381 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8382 		/* No more slots; notify upper layer. */
   8383 		if (!is_transmit)
   8384 			ifp->if_flags |= IFF_OACTIVE;
   8385 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8386 	}
   8387 
   8388 	if (sent) {
   8389 		/* Set a watchdog timer in case the chip flakes out. */
   8390 		txq->txq_lastsent = time_uptime;
   8391 		txq->txq_sending = true;
   8392 	}
   8393 }
   8394 
   8395 static void
   8396 wm_deferred_start_locked(struct wm_txqueue *txq)
   8397 {
   8398 	struct wm_softc *sc = txq->txq_sc;
   8399 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8400 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8401 	int qid = wmq->wmq_id;
   8402 
   8403 	KASSERT(mutex_owned(txq->txq_lock));
   8404 
   8405 	if (txq->txq_stopping) {
   8406 		mutex_exit(txq->txq_lock);
   8407 		return;
   8408 	}
   8409 
   8410 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8411 		/* XXX need for ALTQ or one CPU system */
   8412 		if (qid == 0)
   8413 			wm_nq_start_locked(ifp);
   8414 		wm_nq_transmit_locked(ifp, txq);
   8415 	} else {
   8416 		/* XXX need for ALTQ or one CPU system */
   8417 		if (qid == 0)
   8418 			wm_start_locked(ifp);
   8419 		wm_transmit_locked(ifp, txq);
   8420 	}
   8421 }
   8422 
   8423 /* Interrupt */
   8424 
   8425 /*
   8426  * wm_txeof:
   8427  *
   8428  *	Helper; handle transmit interrupts.
   8429  */
   8430 static bool
   8431 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8432 {
   8433 	struct wm_softc *sc = txq->txq_sc;
   8434 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8435 	struct wm_txsoft *txs;
   8436 	int count = 0;
   8437 	int i;
   8438 	uint8_t status;
   8439 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8440 	bool more = false;
   8441 
   8442 	KASSERT(mutex_owned(txq->txq_lock));
   8443 
   8444 	if (txq->txq_stopping)
   8445 		return false;
   8446 
   8447 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8448 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8449 	if (wmq->wmq_id == 0)
   8450 		ifp->if_flags &= ~IFF_OACTIVE;
   8451 
   8452 	/*
   8453 	 * Go through the Tx list and free mbufs for those
   8454 	 * frames which have been transmitted.
   8455 	 */
   8456 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8457 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8458 		if (limit-- == 0) {
   8459 			more = true;
   8460 			DPRINTF(WM_DEBUG_TX,
   8461 			    ("%s: TX: loop limited, job %d is not processed\n",
   8462 				device_xname(sc->sc_dev), i));
   8463 			break;
   8464 		}
   8465 
   8466 		txs = &txq->txq_soft[i];
   8467 
   8468 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8469 			device_xname(sc->sc_dev), i));
   8470 
   8471 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8472 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8473 
   8474 		status =
   8475 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8476 		if ((status & WTX_ST_DD) == 0) {
   8477 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8478 			    BUS_DMASYNC_PREREAD);
   8479 			break;
   8480 		}
   8481 
   8482 		count++;
   8483 		DPRINTF(WM_DEBUG_TX,
   8484 		    ("%s: TX: job %d done: descs %d..%d\n",
   8485 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8486 		    txs->txs_lastdesc));
   8487 
   8488 		/*
   8489 		 * XXX We should probably be using the statistics
   8490 		 * XXX registers, but I don't know if they exist
   8491 		 * XXX on chips before the i82544.
   8492 		 */
   8493 
   8494 #ifdef WM_EVENT_COUNTERS
   8495 		if (status & WTX_ST_TU)
   8496 			WM_Q_EVCNT_INCR(txq, underrun);
   8497 #endif /* WM_EVENT_COUNTERS */
   8498 
   8499 		/*
   8500 		 * 82574 and newer's document says the status field has neither
   8501 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8502 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8503 		 * Developer's Manual", 82574 datasheet and newer.
   8504 		 *
   8505 		 * XXX I saw the LC bit was set on I218 even though the media
   8506 		 * was full duplex, so the bit might be used for other
   8507 		 * meaning ...(I have no document).
   8508 		 */
   8509 
   8510 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8511 		    && ((sc->sc_type < WM_T_82574)
   8512 			|| (sc->sc_type == WM_T_80003))) {
   8513 			ifp->if_oerrors++;
   8514 			if (status & WTX_ST_LC)
   8515 				log(LOG_WARNING, "%s: late collision\n",
   8516 				    device_xname(sc->sc_dev));
   8517 			else if (status & WTX_ST_EC) {
   8518 				ifp->if_collisions +=
   8519 				    TX_COLLISION_THRESHOLD + 1;
   8520 				log(LOG_WARNING, "%s: excessive collisions\n",
   8521 				    device_xname(sc->sc_dev));
   8522 			}
   8523 		} else
   8524 			ifp->if_opackets++;
   8525 
   8526 		txq->txq_packets++;
   8527 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8528 
   8529 		txq->txq_free += txs->txs_ndesc;
   8530 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8531 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8532 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8533 		m_freem(txs->txs_mbuf);
   8534 		txs->txs_mbuf = NULL;
   8535 	}
   8536 
   8537 	/* Update the dirty transmit buffer pointer. */
   8538 	txq->txq_sdirty = i;
   8539 	DPRINTF(WM_DEBUG_TX,
   8540 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8541 
   8542 	/*
   8543 	 * If there are no more pending transmissions, cancel the watchdog
   8544 	 * timer.
   8545 	 */
   8546 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8547 		txq->txq_sending = false;
   8548 
   8549 	return more;
   8550 }
   8551 
   8552 static inline uint32_t
   8553 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8554 {
   8555 	struct wm_softc *sc = rxq->rxq_sc;
   8556 
   8557 	if (sc->sc_type == WM_T_82574)
   8558 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8559 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8560 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8561 	else
   8562 		return rxq->rxq_descs[idx].wrx_status;
   8563 }
   8564 
   8565 static inline uint32_t
   8566 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8567 {
   8568 	struct wm_softc *sc = rxq->rxq_sc;
   8569 
   8570 	if (sc->sc_type == WM_T_82574)
   8571 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8572 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8573 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8574 	else
   8575 		return rxq->rxq_descs[idx].wrx_errors;
   8576 }
   8577 
   8578 static inline uint16_t
   8579 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8580 {
   8581 	struct wm_softc *sc = rxq->rxq_sc;
   8582 
   8583 	if (sc->sc_type == WM_T_82574)
   8584 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8585 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8586 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8587 	else
   8588 		return rxq->rxq_descs[idx].wrx_special;
   8589 }
   8590 
   8591 static inline int
   8592 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8593 {
   8594 	struct wm_softc *sc = rxq->rxq_sc;
   8595 
   8596 	if (sc->sc_type == WM_T_82574)
   8597 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8598 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8599 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8600 	else
   8601 		return rxq->rxq_descs[idx].wrx_len;
   8602 }
   8603 
   8604 #ifdef WM_DEBUG
   8605 static inline uint32_t
   8606 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8607 {
   8608 	struct wm_softc *sc = rxq->rxq_sc;
   8609 
   8610 	if (sc->sc_type == WM_T_82574)
   8611 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8612 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8613 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8614 	else
   8615 		return 0;
   8616 }
   8617 
   8618 static inline uint8_t
   8619 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8620 {
   8621 	struct wm_softc *sc = rxq->rxq_sc;
   8622 
   8623 	if (sc->sc_type == WM_T_82574)
   8624 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8625 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8626 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8627 	else
   8628 		return 0;
   8629 }
   8630 #endif /* WM_DEBUG */
   8631 
   8632 static inline bool
   8633 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8634     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8635 {
   8636 
   8637 	if (sc->sc_type == WM_T_82574)
   8638 		return (status & ext_bit) != 0;
   8639 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8640 		return (status & nq_bit) != 0;
   8641 	else
   8642 		return (status & legacy_bit) != 0;
   8643 }
   8644 
   8645 static inline bool
   8646 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8647     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8648 {
   8649 
   8650 	if (sc->sc_type == WM_T_82574)
   8651 		return (error & ext_bit) != 0;
   8652 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8653 		return (error & nq_bit) != 0;
   8654 	else
   8655 		return (error & legacy_bit) != 0;
   8656 }
   8657 
   8658 static inline bool
   8659 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8660 {
   8661 
   8662 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8663 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8664 		return true;
   8665 	else
   8666 		return false;
   8667 }
   8668 
   8669 static inline bool
   8670 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8671 {
   8672 	struct wm_softc *sc = rxq->rxq_sc;
   8673 
   8674 	/* XXX missing error bit for newqueue? */
   8675 	if (wm_rxdesc_is_set_error(sc, errors,
   8676 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8677 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8678 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8679 		NQRXC_ERROR_RXE)) {
   8680 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8681 		    EXTRXC_ERROR_SE, 0))
   8682 			log(LOG_WARNING, "%s: symbol error\n",
   8683 			    device_xname(sc->sc_dev));
   8684 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8685 		    EXTRXC_ERROR_SEQ, 0))
   8686 			log(LOG_WARNING, "%s: receive sequence error\n",
   8687 			    device_xname(sc->sc_dev));
   8688 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8689 		    EXTRXC_ERROR_CE, 0))
   8690 			log(LOG_WARNING, "%s: CRC error\n",
   8691 			    device_xname(sc->sc_dev));
   8692 		return true;
   8693 	}
   8694 
   8695 	return false;
   8696 }
   8697 
   8698 static inline bool
   8699 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8700 {
   8701 	struct wm_softc *sc = rxq->rxq_sc;
   8702 
   8703 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8704 		NQRXC_STATUS_DD)) {
   8705 		/* We have processed all of the receive descriptors. */
   8706 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8707 		return false;
   8708 	}
   8709 
   8710 	return true;
   8711 }
   8712 
   8713 static inline bool
   8714 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8715     uint16_t vlantag, struct mbuf *m)
   8716 {
   8717 
   8718 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8719 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8720 		vlan_set_tag(m, le16toh(vlantag));
   8721 	}
   8722 
   8723 	return true;
   8724 }
   8725 
   8726 static inline void
   8727 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8728     uint32_t errors, struct mbuf *m)
   8729 {
   8730 	struct wm_softc *sc = rxq->rxq_sc;
   8731 
   8732 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8733 		if (wm_rxdesc_is_set_status(sc, status,
   8734 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8735 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8736 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8737 			if (wm_rxdesc_is_set_error(sc, errors,
   8738 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8739 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8740 		}
   8741 		if (wm_rxdesc_is_set_status(sc, status,
   8742 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8743 			/*
   8744 			 * Note: we don't know if this was TCP or UDP,
   8745 			 * so we just set both bits, and expect the
   8746 			 * upper layers to deal.
   8747 			 */
   8748 			WM_Q_EVCNT_INCR(rxq, tusum);
   8749 			m->m_pkthdr.csum_flags |=
   8750 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8751 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8752 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8753 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8754 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8755 		}
   8756 	}
   8757 }
   8758 
   8759 /*
   8760  * wm_rxeof:
   8761  *
   8762  *	Helper; handle receive interrupts.
   8763  */
   8764 static bool
   8765 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8766 {
   8767 	struct wm_softc *sc = rxq->rxq_sc;
   8768 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8769 	struct wm_rxsoft *rxs;
   8770 	struct mbuf *m;
   8771 	int i, len;
   8772 	int count = 0;
   8773 	uint32_t status, errors;
   8774 	uint16_t vlantag;
   8775 	bool more = false;
   8776 
   8777 	KASSERT(mutex_owned(rxq->rxq_lock));
   8778 
   8779 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8780 		if (limit-- == 0) {
   8781 			rxq->rxq_ptr = i;
   8782 			more = true;
   8783 			DPRINTF(WM_DEBUG_RX,
   8784 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8785 				device_xname(sc->sc_dev), i));
   8786 			break;
   8787 		}
   8788 
   8789 		rxs = &rxq->rxq_soft[i];
   8790 
   8791 		DPRINTF(WM_DEBUG_RX,
   8792 		    ("%s: RX: checking descriptor %d\n",
   8793 			device_xname(sc->sc_dev), i));
   8794 		wm_cdrxsync(rxq, i,
   8795 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8796 
   8797 		status = wm_rxdesc_get_status(rxq, i);
   8798 		errors = wm_rxdesc_get_errors(rxq, i);
   8799 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8800 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8801 #ifdef WM_DEBUG
   8802 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8803 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8804 #endif
   8805 
   8806 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8807 			/*
   8808 			 * Update the receive pointer holding rxq_lock
   8809 			 * consistent with increment counter.
   8810 			 */
   8811 			rxq->rxq_ptr = i;
   8812 			break;
   8813 		}
   8814 
   8815 		count++;
   8816 		if (__predict_false(rxq->rxq_discard)) {
   8817 			DPRINTF(WM_DEBUG_RX,
   8818 			    ("%s: RX: discarding contents of descriptor %d\n",
   8819 				device_xname(sc->sc_dev), i));
   8820 			wm_init_rxdesc(rxq, i);
   8821 			if (wm_rxdesc_is_eop(rxq, status)) {
   8822 				/* Reset our state. */
   8823 				DPRINTF(WM_DEBUG_RX,
   8824 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8825 					device_xname(sc->sc_dev)));
   8826 				rxq->rxq_discard = 0;
   8827 			}
   8828 			continue;
   8829 		}
   8830 
   8831 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8832 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8833 
   8834 		m = rxs->rxs_mbuf;
   8835 
   8836 		/*
   8837 		 * Add a new receive buffer to the ring, unless of
   8838 		 * course the length is zero. Treat the latter as a
   8839 		 * failed mapping.
   8840 		 */
   8841 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8842 			/*
   8843 			 * Failed, throw away what we've done so
   8844 			 * far, and discard the rest of the packet.
   8845 			 */
   8846 			ifp->if_ierrors++;
   8847 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8848 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8849 			wm_init_rxdesc(rxq, i);
   8850 			if (!wm_rxdesc_is_eop(rxq, status))
   8851 				rxq->rxq_discard = 1;
   8852 			if (rxq->rxq_head != NULL)
   8853 				m_freem(rxq->rxq_head);
   8854 			WM_RXCHAIN_RESET(rxq);
   8855 			DPRINTF(WM_DEBUG_RX,
   8856 			    ("%s: RX: Rx buffer allocation failed, "
   8857 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8858 				rxq->rxq_discard ? " (discard)" : ""));
   8859 			continue;
   8860 		}
   8861 
   8862 		m->m_len = len;
   8863 		rxq->rxq_len += len;
   8864 		DPRINTF(WM_DEBUG_RX,
   8865 		    ("%s: RX: buffer at %p len %d\n",
   8866 			device_xname(sc->sc_dev), m->m_data, len));
   8867 
   8868 		/* If this is not the end of the packet, keep looking. */
   8869 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8870 			WM_RXCHAIN_LINK(rxq, m);
   8871 			DPRINTF(WM_DEBUG_RX,
   8872 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8873 				device_xname(sc->sc_dev), rxq->rxq_len));
   8874 			continue;
   8875 		}
   8876 
   8877 		/*
   8878 		 * Okay, we have the entire packet now. The chip is
   8879 		 * configured to include the FCS except I350 and I21[01]
   8880 		 * (not all chips can be configured to strip it),
   8881 		 * so we need to trim it.
   8882 		 * May need to adjust length of previous mbuf in the
   8883 		 * chain if the current mbuf is too short.
   8884 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8885 		 * is always set in I350, so we don't trim it.
   8886 		 */
   8887 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8888 		    && (sc->sc_type != WM_T_I210)
   8889 		    && (sc->sc_type != WM_T_I211)) {
   8890 			if (m->m_len < ETHER_CRC_LEN) {
   8891 				rxq->rxq_tail->m_len
   8892 				    -= (ETHER_CRC_LEN - m->m_len);
   8893 				m->m_len = 0;
   8894 			} else
   8895 				m->m_len -= ETHER_CRC_LEN;
   8896 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8897 		} else
   8898 			len = rxq->rxq_len;
   8899 
   8900 		WM_RXCHAIN_LINK(rxq, m);
   8901 
   8902 		*rxq->rxq_tailp = NULL;
   8903 		m = rxq->rxq_head;
   8904 
   8905 		WM_RXCHAIN_RESET(rxq);
   8906 
   8907 		DPRINTF(WM_DEBUG_RX,
   8908 		    ("%s: RX: have entire packet, len -> %d\n",
   8909 			device_xname(sc->sc_dev), len));
   8910 
   8911 		/* If an error occurred, update stats and drop the packet. */
   8912 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8913 			m_freem(m);
   8914 			continue;
   8915 		}
   8916 
   8917 		/* No errors.  Receive the packet. */
   8918 		m_set_rcvif(m, ifp);
   8919 		m->m_pkthdr.len = len;
   8920 		/*
   8921 		 * TODO
   8922 		 * should be save rsshash and rsstype to this mbuf.
   8923 		 */
   8924 		DPRINTF(WM_DEBUG_RX,
   8925 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8926 			device_xname(sc->sc_dev), rsstype, rsshash));
   8927 
   8928 		/*
   8929 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8930 		 * for us.  Associate the tag with the packet.
   8931 		 */
   8932 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8933 			continue;
   8934 
   8935 		/* Set up checksum info for this packet. */
   8936 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8937 		/*
   8938 		 * Update the receive pointer holding rxq_lock consistent with
   8939 		 * increment counter.
   8940 		 */
   8941 		rxq->rxq_ptr = i;
   8942 		rxq->rxq_packets++;
   8943 		rxq->rxq_bytes += len;
   8944 		mutex_exit(rxq->rxq_lock);
   8945 
   8946 		/* Pass it on. */
   8947 		if_percpuq_enqueue(sc->sc_ipq, m);
   8948 
   8949 		mutex_enter(rxq->rxq_lock);
   8950 
   8951 		if (rxq->rxq_stopping)
   8952 			break;
   8953 	}
   8954 
   8955 	DPRINTF(WM_DEBUG_RX,
   8956 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8957 
   8958 	return more;
   8959 }
   8960 
   8961 /*
   8962  * wm_linkintr_gmii:
   8963  *
   8964  *	Helper; handle link interrupts for GMII.
   8965  */
   8966 static void
   8967 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8968 {
   8969 	device_t dev = sc->sc_dev;
   8970 	uint32_t status, reg;
   8971 	bool link;
   8972 	int rv;
   8973 
   8974 	KASSERT(WM_CORE_LOCKED(sc));
   8975 
   8976 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8977 		__func__));
   8978 
   8979 	if ((icr & ICR_LSC) == 0) {
   8980 		if (icr & ICR_RXSEQ)
   8981 			DPRINTF(WM_DEBUG_LINK,
   8982 			    ("%s: LINK Receive sequence error\n",
   8983 				device_xname(dev)));
   8984 		return;
   8985 	}
   8986 
   8987 	/* Link status changed */
   8988 	status = CSR_READ(sc, WMREG_STATUS);
   8989 	link = status & STATUS_LU;
   8990 	if (link) {
   8991 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8992 			device_xname(dev),
   8993 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8994 	} else {
   8995 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8996 			device_xname(dev)));
   8997 	}
   8998 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8999 		wm_gig_downshift_workaround_ich8lan(sc);
   9000 
   9001 	if ((sc->sc_type == WM_T_ICH8)
   9002 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9003 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9004 	}
   9005 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9006 		device_xname(dev)));
   9007 	mii_pollstat(&sc->sc_mii);
   9008 	if (sc->sc_type == WM_T_82543) {
   9009 		int miistatus, active;
   9010 
   9011 		/*
   9012 		 * With 82543, we need to force speed and
   9013 		 * duplex on the MAC equal to what the PHY
   9014 		 * speed and duplex configuration is.
   9015 		 */
   9016 		miistatus = sc->sc_mii.mii_media_status;
   9017 
   9018 		if (miistatus & IFM_ACTIVE) {
   9019 			active = sc->sc_mii.mii_media_active;
   9020 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9021 			switch (IFM_SUBTYPE(active)) {
   9022 			case IFM_10_T:
   9023 				sc->sc_ctrl |= CTRL_SPEED_10;
   9024 				break;
   9025 			case IFM_100_TX:
   9026 				sc->sc_ctrl |= CTRL_SPEED_100;
   9027 				break;
   9028 			case IFM_1000_T:
   9029 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9030 				break;
   9031 			default:
   9032 				/*
   9033 				 * Fiber?
   9034 				 * Shoud not enter here.
   9035 				 */
   9036 				printf("unknown media (%x)\n", active);
   9037 				break;
   9038 			}
   9039 			if (active & IFM_FDX)
   9040 				sc->sc_ctrl |= CTRL_FD;
   9041 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9042 		}
   9043 	} else if (sc->sc_type == WM_T_PCH) {
   9044 		wm_k1_gig_workaround_hv(sc,
   9045 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9046 	}
   9047 
   9048 	/*
   9049 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9050 	 * aggressive resulting in many collisions. To avoid this, increase
   9051 	 * the IPG and reduce Rx latency in the PHY.
   9052 	 */
   9053 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9054 	    && link) {
   9055 		uint32_t tipg_reg;
   9056 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9057 		bool fdx;
   9058 		uint16_t emi_addr, emi_val;
   9059 
   9060 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9061 		tipg_reg &= ~TIPG_IPGT_MASK;
   9062 		fdx = status & STATUS_FD;
   9063 
   9064 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9065 			tipg_reg |= 0xff;
   9066 			/* Reduce Rx latency in analog PHY */
   9067 			emi_val = 0;
   9068 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9069 		    fdx && speed != STATUS_SPEED_1000) {
   9070 			tipg_reg |= 0xc;
   9071 			emi_val = 1;
   9072 		} else {
   9073 			/* Roll back the default values */
   9074 			tipg_reg |= 0x08;
   9075 			emi_val = 1;
   9076 		}
   9077 
   9078 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9079 
   9080 		rv = sc->phy.acquire(sc);
   9081 		if (rv)
   9082 			return;
   9083 
   9084 		if (sc->sc_type == WM_T_PCH2)
   9085 			emi_addr = I82579_RX_CONFIG;
   9086 		else
   9087 			emi_addr = I217_RX_CONFIG;
   9088 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9089 
   9090 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9091 			uint16_t phy_reg;
   9092 
   9093 			sc->phy.readreg_locked(dev, 2,
   9094 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9095 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9096 			if (speed == STATUS_SPEED_100
   9097 			    || speed == STATUS_SPEED_10)
   9098 				phy_reg |= 0x3e8;
   9099 			else
   9100 				phy_reg |= 0xfa;
   9101 			sc->phy.writereg_locked(dev, 2,
   9102 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9103 
   9104 			if (speed == STATUS_SPEED_1000) {
   9105 				sc->phy.readreg_locked(dev, 2,
   9106 				    HV_PM_CTRL, &phy_reg);
   9107 
   9108 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9109 
   9110 				sc->phy.writereg_locked(dev, 2,
   9111 				    HV_PM_CTRL, phy_reg);
   9112 			}
   9113 		}
   9114 		sc->phy.release(sc);
   9115 
   9116 		if (rv)
   9117 			return;
   9118 
   9119 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9120 			uint16_t data, ptr_gap;
   9121 
   9122 			if (speed == STATUS_SPEED_1000) {
   9123 				rv = sc->phy.acquire(sc);
   9124 				if (rv)
   9125 					return;
   9126 
   9127 				rv = sc->phy.readreg_locked(dev, 2,
   9128 				    I219_UNKNOWN1, &data);
   9129 				if (rv) {
   9130 					sc->phy.release(sc);
   9131 					return;
   9132 				}
   9133 
   9134 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9135 				if (ptr_gap < 0x18) {
   9136 					data &= ~(0x3ff << 2);
   9137 					data |= (0x18 << 2);
   9138 					rv = sc->phy.writereg_locked(dev,
   9139 					    2, I219_UNKNOWN1, data);
   9140 				}
   9141 				sc->phy.release(sc);
   9142 				if (rv)
   9143 					return;
   9144 			} else {
   9145 				rv = sc->phy.acquire(sc);
   9146 				if (rv)
   9147 					return;
   9148 
   9149 				rv = sc->phy.writereg_locked(dev, 2,
   9150 				    I219_UNKNOWN1, 0xc023);
   9151 				sc->phy.release(sc);
   9152 				if (rv)
   9153 					return;
   9154 
   9155 			}
   9156 		}
   9157 	}
   9158 
   9159 	/*
   9160 	 * I217 Packet Loss issue:
   9161 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9162 	 * on power up.
   9163 	 * Set the Beacon Duration for I217 to 8 usec
   9164 	 */
   9165 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9166 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9167 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9168 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9169 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9170 	}
   9171 
   9172 	/* Work-around I218 hang issue */
   9173 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9174 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9175 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9176 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9177 		wm_k1_workaround_lpt_lp(sc, link);
   9178 
   9179 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9180 		/*
   9181 		 * Set platform power management values for Latency
   9182 		 * Tolerance Reporting (LTR)
   9183 		 */
   9184 		wm_platform_pm_pch_lpt(sc,
   9185 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9186 	}
   9187 
   9188 	/* Clear link partner's EEE ability */
   9189 	sc->eee_lp_ability = 0;
   9190 
   9191 	/* FEXTNVM6 K1-off workaround */
   9192 	if (sc->sc_type == WM_T_PCH_SPT) {
   9193 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9194 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9195 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9196 		else
   9197 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9198 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9199 	}
   9200 
   9201 	if (!link)
   9202 		return;
   9203 
   9204 	switch (sc->sc_type) {
   9205 	case WM_T_PCH2:
   9206 		wm_k1_workaround_lv(sc);
   9207 		/* FALLTHROUGH */
   9208 	case WM_T_PCH:
   9209 		if (sc->sc_phytype == WMPHY_82578)
   9210 			wm_link_stall_workaround_hv(sc);
   9211 		break;
   9212 	default:
   9213 		break;
   9214 	}
   9215 
   9216 	/* Enable/Disable EEE after link up */
   9217 	if (sc->sc_phytype > WMPHY_82579)
   9218 		wm_set_eee_pchlan(sc);
   9219 }
   9220 
   9221 /*
   9222  * wm_linkintr_tbi:
   9223  *
   9224  *	Helper; handle link interrupts for TBI mode.
   9225  */
   9226 static void
   9227 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9228 {
   9229 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9230 	uint32_t status;
   9231 
   9232 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9233 		__func__));
   9234 
   9235 	status = CSR_READ(sc, WMREG_STATUS);
   9236 	if (icr & ICR_LSC) {
   9237 		wm_check_for_link(sc);
   9238 		if (status & STATUS_LU) {
   9239 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9240 				device_xname(sc->sc_dev),
   9241 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9242 			/*
   9243 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9244 			 * so we should update sc->sc_ctrl
   9245 			 */
   9246 
   9247 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9248 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9249 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9250 			if (status & STATUS_FD)
   9251 				sc->sc_tctl |=
   9252 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9253 			else
   9254 				sc->sc_tctl |=
   9255 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9256 			if (sc->sc_ctrl & CTRL_TFCE)
   9257 				sc->sc_fcrtl |= FCRTL_XONE;
   9258 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9259 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9260 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9261 			sc->sc_tbi_linkup = 1;
   9262 			if_link_state_change(ifp, LINK_STATE_UP);
   9263 		} else {
   9264 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9265 				device_xname(sc->sc_dev)));
   9266 			sc->sc_tbi_linkup = 0;
   9267 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9268 		}
   9269 		/* Update LED */
   9270 		wm_tbi_serdes_set_linkled(sc);
   9271 	} else if (icr & ICR_RXSEQ)
   9272 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9273 			device_xname(sc->sc_dev)));
   9274 }
   9275 
   9276 /*
   9277  * wm_linkintr_serdes:
   9278  *
   9279  *	Helper; handle link interrupts for TBI mode.
   9280  */
   9281 static void
   9282 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9283 {
   9284 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9285 	struct mii_data *mii = &sc->sc_mii;
   9286 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9287 	uint32_t pcs_adv, pcs_lpab, reg;
   9288 
   9289 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9290 		__func__));
   9291 
   9292 	if (icr & ICR_LSC) {
   9293 		/* Check PCS */
   9294 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9295 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9296 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9297 				device_xname(sc->sc_dev)));
   9298 			mii->mii_media_status |= IFM_ACTIVE;
   9299 			sc->sc_tbi_linkup = 1;
   9300 			if_link_state_change(ifp, LINK_STATE_UP);
   9301 		} else {
   9302 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9303 				device_xname(sc->sc_dev)));
   9304 			mii->mii_media_status |= IFM_NONE;
   9305 			sc->sc_tbi_linkup = 0;
   9306 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9307 			wm_tbi_serdes_set_linkled(sc);
   9308 			return;
   9309 		}
   9310 		mii->mii_media_active |= IFM_1000_SX;
   9311 		if ((reg & PCS_LSTS_FDX) != 0)
   9312 			mii->mii_media_active |= IFM_FDX;
   9313 		else
   9314 			mii->mii_media_active |= IFM_HDX;
   9315 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9316 			/* Check flow */
   9317 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9318 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9319 				DPRINTF(WM_DEBUG_LINK,
   9320 				    ("XXX LINKOK but not ACOMP\n"));
   9321 				return;
   9322 			}
   9323 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9324 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9325 			DPRINTF(WM_DEBUG_LINK,
   9326 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9327 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9328 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9329 				mii->mii_media_active |= IFM_FLOW
   9330 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9331 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9332 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9333 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9334 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9335 				mii->mii_media_active |= IFM_FLOW
   9336 				    | IFM_ETH_TXPAUSE;
   9337 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9338 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9339 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9340 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9341 				mii->mii_media_active |= IFM_FLOW
   9342 				    | IFM_ETH_RXPAUSE;
   9343 		}
   9344 		/* Update LED */
   9345 		wm_tbi_serdes_set_linkled(sc);
   9346 	} else
   9347 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9348 		    device_xname(sc->sc_dev)));
   9349 }
   9350 
   9351 /*
   9352  * wm_linkintr:
   9353  *
   9354  *	Helper; handle link interrupts.
   9355  */
   9356 static void
   9357 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9358 {
   9359 
   9360 	KASSERT(WM_CORE_LOCKED(sc));
   9361 
   9362 	if (sc->sc_flags & WM_F_HAS_MII)
   9363 		wm_linkintr_gmii(sc, icr);
   9364 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9365 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9366 		wm_linkintr_serdes(sc, icr);
   9367 	else
   9368 		wm_linkintr_tbi(sc, icr);
   9369 }
   9370 
   9371 /*
   9372  * wm_intr_legacy:
   9373  *
   9374  *	Interrupt service routine for INTx and MSI.
   9375  */
   9376 static int
   9377 wm_intr_legacy(void *arg)
   9378 {
   9379 	struct wm_softc *sc = arg;
   9380 	struct wm_queue *wmq = &sc->sc_queue[0];
   9381 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9382 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9383 	uint32_t icr, rndval = 0;
   9384 	int handled = 0;
   9385 
   9386 	while (1 /* CONSTCOND */) {
   9387 		icr = CSR_READ(sc, WMREG_ICR);
   9388 		if ((icr & sc->sc_icr) == 0)
   9389 			break;
   9390 		if (handled == 0)
   9391 			DPRINTF(WM_DEBUG_TX,
   9392 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9393 		if (rndval == 0)
   9394 			rndval = icr;
   9395 
   9396 		mutex_enter(rxq->rxq_lock);
   9397 
   9398 		if (rxq->rxq_stopping) {
   9399 			mutex_exit(rxq->rxq_lock);
   9400 			break;
   9401 		}
   9402 
   9403 		handled = 1;
   9404 
   9405 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9406 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9407 			DPRINTF(WM_DEBUG_RX,
   9408 			    ("%s: RX: got Rx intr 0x%08x\n",
   9409 				device_xname(sc->sc_dev),
   9410 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9411 			WM_Q_EVCNT_INCR(rxq, intr);
   9412 		}
   9413 #endif
   9414 		/*
   9415 		 * wm_rxeof() does *not* call upper layer functions directly,
   9416 		 * as if_percpuq_enqueue() just call softint_schedule().
   9417 		 * So, we can call wm_rxeof() in interrupt context.
   9418 		 */
   9419 		wm_rxeof(rxq, UINT_MAX);
   9420 		/* Fill lower bits with RX index. See below for the upper. */
   9421 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9422 
   9423 		mutex_exit(rxq->rxq_lock);
   9424 		mutex_enter(txq->txq_lock);
   9425 
   9426 		if (txq->txq_stopping) {
   9427 			mutex_exit(txq->txq_lock);
   9428 			break;
   9429 		}
   9430 
   9431 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9432 		if (icr & ICR_TXDW) {
   9433 			DPRINTF(WM_DEBUG_TX,
   9434 			    ("%s: TX: got TXDW interrupt\n",
   9435 				device_xname(sc->sc_dev)));
   9436 			WM_Q_EVCNT_INCR(txq, txdw);
   9437 		}
   9438 #endif
   9439 		wm_txeof(txq, UINT_MAX);
   9440 		/* Fill upper bits with TX index. See above for the lower. */
   9441 		rndval = txq->txq_next * WM_NRXDESC;
   9442 
   9443 		mutex_exit(txq->txq_lock);
   9444 		WM_CORE_LOCK(sc);
   9445 
   9446 		if (sc->sc_core_stopping) {
   9447 			WM_CORE_UNLOCK(sc);
   9448 			break;
   9449 		}
   9450 
   9451 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9452 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9453 			wm_linkintr(sc, icr);
   9454 		}
   9455 
   9456 		WM_CORE_UNLOCK(sc);
   9457 
   9458 		if (icr & ICR_RXO) {
   9459 #if defined(WM_DEBUG)
   9460 			log(LOG_WARNING, "%s: Receive overrun\n",
   9461 			    device_xname(sc->sc_dev));
   9462 #endif /* defined(WM_DEBUG) */
   9463 		}
   9464 	}
   9465 
   9466 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9467 
   9468 	if (handled) {
   9469 		/* Try to get more packets going. */
   9470 		softint_schedule(wmq->wmq_si);
   9471 	}
   9472 
   9473 	return handled;
   9474 }
   9475 
   9476 static inline void
   9477 wm_txrxintr_disable(struct wm_queue *wmq)
   9478 {
   9479 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9480 
   9481 	if (sc->sc_type == WM_T_82574)
   9482 		CSR_WRITE(sc, WMREG_IMC,
   9483 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9484 	else if (sc->sc_type == WM_T_82575)
   9485 		CSR_WRITE(sc, WMREG_EIMC,
   9486 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9487 	else
   9488 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9489 }
   9490 
   9491 static inline void
   9492 wm_txrxintr_enable(struct wm_queue *wmq)
   9493 {
   9494 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9495 
   9496 	wm_itrs_calculate(sc, wmq);
   9497 
   9498 	/*
   9499 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9500 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9501 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9502 	 * while each wm_handle_queue(wmq) is runnig.
   9503 	 */
   9504 	if (sc->sc_type == WM_T_82574)
   9505 		CSR_WRITE(sc, WMREG_IMS,
   9506 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9507 	else if (sc->sc_type == WM_T_82575)
   9508 		CSR_WRITE(sc, WMREG_EIMS,
   9509 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9510 	else
   9511 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9512 }
   9513 
   9514 static int
   9515 wm_txrxintr_msix(void *arg)
   9516 {
   9517 	struct wm_queue *wmq = arg;
   9518 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9519 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9520 	struct wm_softc *sc = txq->txq_sc;
   9521 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9522 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9523 	uint32_t rndval = 0;
   9524 	bool txmore;
   9525 	bool rxmore;
   9526 
   9527 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9528 
   9529 	DPRINTF(WM_DEBUG_TX,
   9530 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9531 
   9532 	wm_txrxintr_disable(wmq);
   9533 
   9534 	mutex_enter(txq->txq_lock);
   9535 
   9536 	if (txq->txq_stopping) {
   9537 		mutex_exit(txq->txq_lock);
   9538 		return 0;
   9539 	}
   9540 
   9541 	WM_Q_EVCNT_INCR(txq, txdw);
   9542 	txmore = wm_txeof(txq, txlimit);
   9543 	/* Fill upper bits with TX index. See below for the lower. */
   9544 	rndval = txq->txq_next * WM_NRXDESC;
   9545 	/* wm_deferred start() is done in wm_handle_queue(). */
   9546 	mutex_exit(txq->txq_lock);
   9547 
   9548 	DPRINTF(WM_DEBUG_RX,
   9549 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9550 	mutex_enter(rxq->rxq_lock);
   9551 
   9552 	if (rxq->rxq_stopping) {
   9553 		mutex_exit(rxq->rxq_lock);
   9554 		return 0;
   9555 	}
   9556 
   9557 	WM_Q_EVCNT_INCR(rxq, intr);
   9558 	rxmore = wm_rxeof(rxq, rxlimit);
   9559 
   9560 	/* Fill lower bits with RX index. See above for the upper. */
   9561 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9562 	mutex_exit(rxq->rxq_lock);
   9563 
   9564 	wm_itrs_writereg(sc, wmq);
   9565 
   9566 	/*
   9567 	 * This function is called in the hardware interrupt context and
   9568 	 * per-CPU, so it's not required to take a lock.
   9569 	 */
   9570 	if (rndval != 0)
   9571 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9572 
   9573 	if (txmore || rxmore)
   9574 		softint_schedule(wmq->wmq_si);
   9575 	else
   9576 		wm_txrxintr_enable(wmq);
   9577 
   9578 	return 1;
   9579 }
   9580 
   9581 static void
   9582 wm_handle_queue(void *arg)
   9583 {
   9584 	struct wm_queue *wmq = arg;
   9585 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9586 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9587 	struct wm_softc *sc = txq->txq_sc;
   9588 	u_int txlimit = sc->sc_tx_process_limit;
   9589 	u_int rxlimit = sc->sc_rx_process_limit;
   9590 	bool txmore;
   9591 	bool rxmore;
   9592 
   9593 	mutex_enter(txq->txq_lock);
   9594 	if (txq->txq_stopping) {
   9595 		mutex_exit(txq->txq_lock);
   9596 		return;
   9597 	}
   9598 	txmore = wm_txeof(txq, txlimit);
   9599 	wm_deferred_start_locked(txq);
   9600 	mutex_exit(txq->txq_lock);
   9601 
   9602 	mutex_enter(rxq->rxq_lock);
   9603 	if (rxq->rxq_stopping) {
   9604 		mutex_exit(rxq->rxq_lock);
   9605 		return;
   9606 	}
   9607 	WM_Q_EVCNT_INCR(rxq, defer);
   9608 	rxmore = wm_rxeof(rxq, rxlimit);
   9609 	mutex_exit(rxq->rxq_lock);
   9610 
   9611 	if (txmore || rxmore)
   9612 		softint_schedule(wmq->wmq_si);
   9613 	else
   9614 		wm_txrxintr_enable(wmq);
   9615 }
   9616 
   9617 /*
   9618  * wm_linkintr_msix:
   9619  *
   9620  *	Interrupt service routine for link status change for MSI-X.
   9621  */
   9622 static int
   9623 wm_linkintr_msix(void *arg)
   9624 {
   9625 	struct wm_softc *sc = arg;
   9626 	uint32_t reg;
   9627 	bool has_rxo;
   9628 
   9629 	DPRINTF(WM_DEBUG_LINK,
   9630 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9631 
   9632 	reg = CSR_READ(sc, WMREG_ICR);
   9633 	WM_CORE_LOCK(sc);
   9634 	if (sc->sc_core_stopping)
   9635 		goto out;
   9636 
   9637 	if ((reg & ICR_LSC) != 0) {
   9638 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9639 		wm_linkintr(sc, ICR_LSC);
   9640 	}
   9641 
   9642 	/*
   9643 	 * XXX 82574 MSI-X mode workaround
   9644 	 *
   9645 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9646 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9647 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9648 	 * interrupts by writing WMREG_ICS to process receive packets.
   9649 	 */
   9650 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9651 #if defined(WM_DEBUG)
   9652 		log(LOG_WARNING, "%s: Receive overrun\n",
   9653 		    device_xname(sc->sc_dev));
   9654 #endif /* defined(WM_DEBUG) */
   9655 
   9656 		has_rxo = true;
   9657 		/*
   9658 		 * The RXO interrupt is very high rate when receive traffic is
   9659 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9660 		 * interrupts. ICR_OTHER will be enabled at the end of
   9661 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9662 		 * ICR_RXQ(1) interrupts.
   9663 		 */
   9664 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9665 
   9666 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9667 	}
   9668 
   9669 
   9670 
   9671 out:
   9672 	WM_CORE_UNLOCK(sc);
   9673 
   9674 	if (sc->sc_type == WM_T_82574) {
   9675 		if (!has_rxo)
   9676 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9677 		else
   9678 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9679 	} else if (sc->sc_type == WM_T_82575)
   9680 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9681 	else
   9682 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9683 
   9684 	return 1;
   9685 }
   9686 
   9687 /*
   9688  * Media related.
   9689  * GMII, SGMII, TBI (and SERDES)
   9690  */
   9691 
   9692 /* Common */
   9693 
   9694 /*
   9695  * wm_tbi_serdes_set_linkled:
   9696  *
   9697  *	Update the link LED on TBI and SERDES devices.
   9698  */
   9699 static void
   9700 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9701 {
   9702 
   9703 	if (sc->sc_tbi_linkup)
   9704 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9705 	else
   9706 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9707 
   9708 	/* 82540 or newer devices are active low */
   9709 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9710 
   9711 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9712 }
   9713 
   9714 /* GMII related */
   9715 
   9716 /*
   9717  * wm_gmii_reset:
   9718  *
   9719  *	Reset the PHY.
   9720  */
   9721 static void
   9722 wm_gmii_reset(struct wm_softc *sc)
   9723 {
   9724 	uint32_t reg;
   9725 	int rv;
   9726 
   9727 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9728 		device_xname(sc->sc_dev), __func__));
   9729 
   9730 	rv = sc->phy.acquire(sc);
   9731 	if (rv != 0) {
   9732 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9733 		    __func__);
   9734 		return;
   9735 	}
   9736 
   9737 	switch (sc->sc_type) {
   9738 	case WM_T_82542_2_0:
   9739 	case WM_T_82542_2_1:
   9740 		/* null */
   9741 		break;
   9742 	case WM_T_82543:
   9743 		/*
   9744 		 * With 82543, we need to force speed and duplex on the MAC
   9745 		 * equal to what the PHY speed and duplex configuration is.
   9746 		 * In addition, we need to perform a hardware reset on the PHY
   9747 		 * to take it out of reset.
   9748 		 */
   9749 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9750 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9751 
   9752 		/* The PHY reset pin is active-low. */
   9753 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9754 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9755 		    CTRL_EXT_SWDPIN(4));
   9756 		reg |= CTRL_EXT_SWDPIO(4);
   9757 
   9758 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9759 		CSR_WRITE_FLUSH(sc);
   9760 		delay(10*1000);
   9761 
   9762 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9763 		CSR_WRITE_FLUSH(sc);
   9764 		delay(150);
   9765 #if 0
   9766 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9767 #endif
   9768 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9769 		break;
   9770 	case WM_T_82544:	/* Reset 10000us */
   9771 	case WM_T_82540:
   9772 	case WM_T_82545:
   9773 	case WM_T_82545_3:
   9774 	case WM_T_82546:
   9775 	case WM_T_82546_3:
   9776 	case WM_T_82541:
   9777 	case WM_T_82541_2:
   9778 	case WM_T_82547:
   9779 	case WM_T_82547_2:
   9780 	case WM_T_82571:	/* Reset 100us */
   9781 	case WM_T_82572:
   9782 	case WM_T_82573:
   9783 	case WM_T_82574:
   9784 	case WM_T_82575:
   9785 	case WM_T_82576:
   9786 	case WM_T_82580:
   9787 	case WM_T_I350:
   9788 	case WM_T_I354:
   9789 	case WM_T_I210:
   9790 	case WM_T_I211:
   9791 	case WM_T_82583:
   9792 	case WM_T_80003:
   9793 		/* Generic reset */
   9794 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9795 		CSR_WRITE_FLUSH(sc);
   9796 		delay(20000);
   9797 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9798 		CSR_WRITE_FLUSH(sc);
   9799 		delay(20000);
   9800 
   9801 		if ((sc->sc_type == WM_T_82541)
   9802 		    || (sc->sc_type == WM_T_82541_2)
   9803 		    || (sc->sc_type == WM_T_82547)
   9804 		    || (sc->sc_type == WM_T_82547_2)) {
   9805 			/* Workaround for igp are done in igp_reset() */
   9806 			/* XXX add code to set LED after phy reset */
   9807 		}
   9808 		break;
   9809 	case WM_T_ICH8:
   9810 	case WM_T_ICH9:
   9811 	case WM_T_ICH10:
   9812 	case WM_T_PCH:
   9813 	case WM_T_PCH2:
   9814 	case WM_T_PCH_LPT:
   9815 	case WM_T_PCH_SPT:
   9816 	case WM_T_PCH_CNP:
   9817 		/* Generic reset */
   9818 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9819 		CSR_WRITE_FLUSH(sc);
   9820 		delay(100);
   9821 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9822 		CSR_WRITE_FLUSH(sc);
   9823 		delay(150);
   9824 		break;
   9825 	default:
   9826 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9827 		    __func__);
   9828 		break;
   9829 	}
   9830 
   9831 	sc->phy.release(sc);
   9832 
   9833 	/* get_cfg_done */
   9834 	wm_get_cfg_done(sc);
   9835 
   9836 	/* Extra setup */
   9837 	switch (sc->sc_type) {
   9838 	case WM_T_82542_2_0:
   9839 	case WM_T_82542_2_1:
   9840 	case WM_T_82543:
   9841 	case WM_T_82544:
   9842 	case WM_T_82540:
   9843 	case WM_T_82545:
   9844 	case WM_T_82545_3:
   9845 	case WM_T_82546:
   9846 	case WM_T_82546_3:
   9847 	case WM_T_82541_2:
   9848 	case WM_T_82547_2:
   9849 	case WM_T_82571:
   9850 	case WM_T_82572:
   9851 	case WM_T_82573:
   9852 	case WM_T_82574:
   9853 	case WM_T_82583:
   9854 	case WM_T_82575:
   9855 	case WM_T_82576:
   9856 	case WM_T_82580:
   9857 	case WM_T_I350:
   9858 	case WM_T_I354:
   9859 	case WM_T_I210:
   9860 	case WM_T_I211:
   9861 	case WM_T_80003:
   9862 		/* Null */
   9863 		break;
   9864 	case WM_T_82541:
   9865 	case WM_T_82547:
   9866 		/* XXX Configure actively LED after PHY reset */
   9867 		break;
   9868 	case WM_T_ICH8:
   9869 	case WM_T_ICH9:
   9870 	case WM_T_ICH10:
   9871 	case WM_T_PCH:
   9872 	case WM_T_PCH2:
   9873 	case WM_T_PCH_LPT:
   9874 	case WM_T_PCH_SPT:
   9875 	case WM_T_PCH_CNP:
   9876 		wm_phy_post_reset(sc);
   9877 		break;
   9878 	default:
   9879 		panic("%s: unknown type\n", __func__);
   9880 		break;
   9881 	}
   9882 }
   9883 
   9884 /*
   9885  * Setup sc_phytype and mii_{read|write}reg.
   9886  *
   9887  *  To identify PHY type, correct read/write function should be selected.
   9888  * To select correct read/write function, PCI ID or MAC type are required
   9889  * without accessing PHY registers.
   9890  *
   9891  *  On the first call of this function, PHY ID is not known yet. Check
   9892  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9893  * result might be incorrect.
   9894  *
   9895  *  In the second call, PHY OUI and model is used to identify PHY type.
   9896  * It might not be perfpect because of the lack of compared entry, but it
   9897  * would be better than the first call.
   9898  *
   9899  *  If the detected new result and previous assumption is different,
   9900  * diagnous message will be printed.
   9901  */
   9902 static void
   9903 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9904     uint16_t phy_model)
   9905 {
   9906 	device_t dev = sc->sc_dev;
   9907 	struct mii_data *mii = &sc->sc_mii;
   9908 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9909 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9910 	mii_readreg_t new_readreg;
   9911 	mii_writereg_t new_writereg;
   9912 
   9913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9914 		device_xname(sc->sc_dev), __func__));
   9915 
   9916 	if (mii->mii_readreg == NULL) {
   9917 		/*
   9918 		 *  This is the first call of this function. For ICH and PCH
   9919 		 * variants, it's difficult to determine the PHY access method
   9920 		 * by sc_type, so use the PCI product ID for some devices.
   9921 		 */
   9922 
   9923 		switch (sc->sc_pcidevid) {
   9924 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9925 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9926 			/* 82577 */
   9927 			new_phytype = WMPHY_82577;
   9928 			break;
   9929 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9930 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9931 			/* 82578 */
   9932 			new_phytype = WMPHY_82578;
   9933 			break;
   9934 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9935 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9936 			/* 82579 */
   9937 			new_phytype = WMPHY_82579;
   9938 			break;
   9939 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9940 		case PCI_PRODUCT_INTEL_82801I_BM:
   9941 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9942 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9943 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9944 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9945 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9946 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9947 			/* ICH8, 9, 10 with 82567 */
   9948 			new_phytype = WMPHY_BM;
   9949 			break;
   9950 		default:
   9951 			break;
   9952 		}
   9953 	} else {
   9954 		/* It's not the first call. Use PHY OUI and model */
   9955 		switch (phy_oui) {
   9956 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9957 			switch (phy_model) {
   9958 			case 0x0004: /* XXX */
   9959 				new_phytype = WMPHY_82578;
   9960 				break;
   9961 			default:
   9962 				break;
   9963 			}
   9964 			break;
   9965 		case MII_OUI_xxMARVELL:
   9966 			switch (phy_model) {
   9967 			case MII_MODEL_xxMARVELL_I210:
   9968 				new_phytype = WMPHY_I210;
   9969 				break;
   9970 			case MII_MODEL_xxMARVELL_E1011:
   9971 			case MII_MODEL_xxMARVELL_E1000_3:
   9972 			case MII_MODEL_xxMARVELL_E1000_5:
   9973 			case MII_MODEL_xxMARVELL_E1112:
   9974 				new_phytype = WMPHY_M88;
   9975 				break;
   9976 			case MII_MODEL_xxMARVELL_E1149:
   9977 				new_phytype = WMPHY_BM;
   9978 				break;
   9979 			case MII_MODEL_xxMARVELL_E1111:
   9980 			case MII_MODEL_xxMARVELL_I347:
   9981 			case MII_MODEL_xxMARVELL_E1512:
   9982 			case MII_MODEL_xxMARVELL_E1340M:
   9983 			case MII_MODEL_xxMARVELL_E1543:
   9984 				new_phytype = WMPHY_M88;
   9985 				break;
   9986 			case MII_MODEL_xxMARVELL_I82563:
   9987 				new_phytype = WMPHY_GG82563;
   9988 				break;
   9989 			default:
   9990 				break;
   9991 			}
   9992 			break;
   9993 		case MII_OUI_INTEL:
   9994 			switch (phy_model) {
   9995 			case MII_MODEL_INTEL_I82577:
   9996 				new_phytype = WMPHY_82577;
   9997 				break;
   9998 			case MII_MODEL_INTEL_I82579:
   9999 				new_phytype = WMPHY_82579;
   10000 				break;
   10001 			case MII_MODEL_INTEL_I217:
   10002 				new_phytype = WMPHY_I217;
   10003 				break;
   10004 			case MII_MODEL_INTEL_I82580:
   10005 			case MII_MODEL_INTEL_I350:
   10006 				new_phytype = WMPHY_82580;
   10007 				break;
   10008 			default:
   10009 				break;
   10010 			}
   10011 			break;
   10012 		case MII_OUI_yyINTEL:
   10013 			switch (phy_model) {
   10014 			case MII_MODEL_yyINTEL_I82562G:
   10015 			case MII_MODEL_yyINTEL_I82562EM:
   10016 			case MII_MODEL_yyINTEL_I82562ET:
   10017 				new_phytype = WMPHY_IFE;
   10018 				break;
   10019 			case MII_MODEL_yyINTEL_IGP01E1000:
   10020 				new_phytype = WMPHY_IGP;
   10021 				break;
   10022 			case MII_MODEL_yyINTEL_I82566:
   10023 				new_phytype = WMPHY_IGP_3;
   10024 				break;
   10025 			default:
   10026 				break;
   10027 			}
   10028 			break;
   10029 		default:
   10030 			break;
   10031 		}
   10032 		if (new_phytype == WMPHY_UNKNOWN)
   10033 			aprint_verbose_dev(dev,
   10034 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10035 			    __func__, phy_oui, phy_model);
   10036 
   10037 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10038 		    && (sc->sc_phytype != new_phytype )) {
   10039 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10040 			    "was incorrect. PHY type from PHY ID = %u\n",
   10041 			    sc->sc_phytype, new_phytype);
   10042 		}
   10043 	}
   10044 
   10045 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10046 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10047 		/* SGMII */
   10048 		new_readreg = wm_sgmii_readreg;
   10049 		new_writereg = wm_sgmii_writereg;
   10050 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10051 		/* BM2 (phyaddr == 1) */
   10052 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10053 		    && (new_phytype != WMPHY_BM)
   10054 		    && (new_phytype != WMPHY_UNKNOWN))
   10055 			doubt_phytype = new_phytype;
   10056 		new_phytype = WMPHY_BM;
   10057 		new_readreg = wm_gmii_bm_readreg;
   10058 		new_writereg = wm_gmii_bm_writereg;
   10059 	} else if (sc->sc_type >= WM_T_PCH) {
   10060 		/* All PCH* use _hv_ */
   10061 		new_readreg = wm_gmii_hv_readreg;
   10062 		new_writereg = wm_gmii_hv_writereg;
   10063 	} else if (sc->sc_type >= WM_T_ICH8) {
   10064 		/* non-82567 ICH8, 9 and 10 */
   10065 		new_readreg = wm_gmii_i82544_readreg;
   10066 		new_writereg = wm_gmii_i82544_writereg;
   10067 	} else if (sc->sc_type >= WM_T_80003) {
   10068 		/* 80003 */
   10069 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10070 		    && (new_phytype != WMPHY_GG82563)
   10071 		    && (new_phytype != WMPHY_UNKNOWN))
   10072 			doubt_phytype = new_phytype;
   10073 		new_phytype = WMPHY_GG82563;
   10074 		new_readreg = wm_gmii_i80003_readreg;
   10075 		new_writereg = wm_gmii_i80003_writereg;
   10076 	} else if (sc->sc_type >= WM_T_I210) {
   10077 		/* I210 and I211 */
   10078 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10079 		    && (new_phytype != WMPHY_I210)
   10080 		    && (new_phytype != WMPHY_UNKNOWN))
   10081 			doubt_phytype = new_phytype;
   10082 		new_phytype = WMPHY_I210;
   10083 		new_readreg = wm_gmii_gs40g_readreg;
   10084 		new_writereg = wm_gmii_gs40g_writereg;
   10085 	} else if (sc->sc_type >= WM_T_82580) {
   10086 		/* 82580, I350 and I354 */
   10087 		new_readreg = wm_gmii_82580_readreg;
   10088 		new_writereg = wm_gmii_82580_writereg;
   10089 	} else if (sc->sc_type >= WM_T_82544) {
   10090 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10091 		new_readreg = wm_gmii_i82544_readreg;
   10092 		new_writereg = wm_gmii_i82544_writereg;
   10093 	} else {
   10094 		new_readreg = wm_gmii_i82543_readreg;
   10095 		new_writereg = wm_gmii_i82543_writereg;
   10096 	}
   10097 
   10098 	if (new_phytype == WMPHY_BM) {
   10099 		/* All BM use _bm_ */
   10100 		new_readreg = wm_gmii_bm_readreg;
   10101 		new_writereg = wm_gmii_bm_writereg;
   10102 	}
   10103 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10104 		/* All PCH* use _hv_ */
   10105 		new_readreg = wm_gmii_hv_readreg;
   10106 		new_writereg = wm_gmii_hv_writereg;
   10107 	}
   10108 
   10109 	/* Diag output */
   10110 	if (doubt_phytype != WMPHY_UNKNOWN)
   10111 		aprint_error_dev(dev, "Assumed new PHY type was "
   10112 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10113 		    new_phytype);
   10114 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10115 	    && (sc->sc_phytype != new_phytype ))
   10116 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10117 		    "was incorrect. New PHY type = %u\n",
   10118 		    sc->sc_phytype, new_phytype);
   10119 
   10120 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10121 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10122 
   10123 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10124 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10125 		    "function was incorrect.\n");
   10126 
   10127 	/* Update now */
   10128 	sc->sc_phytype = new_phytype;
   10129 	mii->mii_readreg = new_readreg;
   10130 	mii->mii_writereg = new_writereg;
   10131 	if (new_readreg == wm_gmii_hv_readreg) {
   10132 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10133 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10134 	} else if (new_readreg == wm_sgmii_readreg) {
   10135 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10136 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10137 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10138 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10139 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10140 	}
   10141 }
   10142 
   10143 /*
   10144  * wm_get_phy_id_82575:
   10145  *
   10146  * Return PHY ID. Return -1 if it failed.
   10147  */
   10148 static int
   10149 wm_get_phy_id_82575(struct wm_softc *sc)
   10150 {
   10151 	uint32_t reg;
   10152 	int phyid = -1;
   10153 
   10154 	/* XXX */
   10155 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10156 		return -1;
   10157 
   10158 	if (wm_sgmii_uses_mdio(sc)) {
   10159 		switch (sc->sc_type) {
   10160 		case WM_T_82575:
   10161 		case WM_T_82576:
   10162 			reg = CSR_READ(sc, WMREG_MDIC);
   10163 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10164 			break;
   10165 		case WM_T_82580:
   10166 		case WM_T_I350:
   10167 		case WM_T_I354:
   10168 		case WM_T_I210:
   10169 		case WM_T_I211:
   10170 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10171 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10172 			break;
   10173 		default:
   10174 			return -1;
   10175 		}
   10176 	}
   10177 
   10178 	return phyid;
   10179 }
   10180 
   10181 
   10182 /*
   10183  * wm_gmii_mediainit:
   10184  *
   10185  *	Initialize media for use on 1000BASE-T devices.
   10186  */
   10187 static void
   10188 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10189 {
   10190 	device_t dev = sc->sc_dev;
   10191 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10192 	struct mii_data *mii = &sc->sc_mii;
   10193 	uint32_t reg;
   10194 
   10195 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10196 		device_xname(sc->sc_dev), __func__));
   10197 
   10198 	/* We have GMII. */
   10199 	sc->sc_flags |= WM_F_HAS_MII;
   10200 
   10201 	if (sc->sc_type == WM_T_80003)
   10202 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10203 	else
   10204 		sc->sc_tipg = TIPG_1000T_DFLT;
   10205 
   10206 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10207 	if ((sc->sc_type == WM_T_82580)
   10208 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10209 	    || (sc->sc_type == WM_T_I211)) {
   10210 		reg = CSR_READ(sc, WMREG_PHPM);
   10211 		reg &= ~PHPM_GO_LINK_D;
   10212 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10213 	}
   10214 
   10215 	/*
   10216 	 * Let the chip set speed/duplex on its own based on
   10217 	 * signals from the PHY.
   10218 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10219 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10220 	 */
   10221 	sc->sc_ctrl |= CTRL_SLU;
   10222 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10223 
   10224 	/* Initialize our media structures and probe the GMII. */
   10225 	mii->mii_ifp = ifp;
   10226 
   10227 	mii->mii_statchg = wm_gmii_statchg;
   10228 
   10229 	/* get PHY control from SMBus to PCIe */
   10230 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10231 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10232 	    || (sc->sc_type == WM_T_PCH_CNP))
   10233 		wm_init_phy_workarounds_pchlan(sc);
   10234 
   10235 	wm_gmii_reset(sc);
   10236 
   10237 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10238 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10239 	    wm_gmii_mediastatus);
   10240 
   10241 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10242 	    || (sc->sc_type == WM_T_82580)
   10243 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10244 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10245 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10246 			/* Attach only one port */
   10247 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10248 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10249 		} else {
   10250 			int i, id;
   10251 			uint32_t ctrl_ext;
   10252 
   10253 			id = wm_get_phy_id_82575(sc);
   10254 			if (id != -1) {
   10255 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10256 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10257 			}
   10258 			if ((id == -1)
   10259 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10260 				/* Power on sgmii phy if it is disabled */
   10261 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10262 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10263 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10264 				CSR_WRITE_FLUSH(sc);
   10265 				delay(300*1000); /* XXX too long */
   10266 
   10267 				/* From 1 to 8 */
   10268 				for (i = 1; i < 8; i++)
   10269 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10270 					    0xffffffff, i, MII_OFFSET_ANY,
   10271 					    MIIF_DOPAUSE);
   10272 
   10273 				/* Restore previous sfp cage power state */
   10274 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10275 			}
   10276 		}
   10277 	} else
   10278 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10279 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10280 
   10281 	/*
   10282 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10283 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10284 	 */
   10285 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10286 		|| (sc->sc_type == WM_T_PCH_SPT)
   10287 		|| (sc->sc_type == WM_T_PCH_CNP))
   10288 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10289 		wm_set_mdio_slow_mode_hv(sc);
   10290 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10291 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10292 	}
   10293 
   10294 	/*
   10295 	 * (For ICH8 variants)
   10296 	 * If PHY detection failed, use BM's r/w function and retry.
   10297 	 */
   10298 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10299 		/* if failed, retry with *_bm_* */
   10300 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10301 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10302 		    sc->sc_phytype);
   10303 		sc->sc_phytype = WMPHY_BM;
   10304 		mii->mii_readreg = wm_gmii_bm_readreg;
   10305 		mii->mii_writereg = wm_gmii_bm_writereg;
   10306 
   10307 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10308 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10309 	}
   10310 
   10311 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10312 		/* Any PHY wasn't find */
   10313 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10314 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10315 		sc->sc_phytype = WMPHY_NONE;
   10316 	} else {
   10317 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10318 
   10319 		/*
   10320 		 * PHY Found! Check PHY type again by the second call of
   10321 		 * wm_gmii_setup_phytype.
   10322 		 */
   10323 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10324 		    child->mii_mpd_model);
   10325 
   10326 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10327 	}
   10328 }
   10329 
   10330 /*
   10331  * wm_gmii_mediachange:	[ifmedia interface function]
   10332  *
   10333  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10334  */
   10335 static int
   10336 wm_gmii_mediachange(struct ifnet *ifp)
   10337 {
   10338 	struct wm_softc *sc = ifp->if_softc;
   10339 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10340 	int rc;
   10341 
   10342 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10343 		device_xname(sc->sc_dev), __func__));
   10344 	if ((ifp->if_flags & IFF_UP) == 0)
   10345 		return 0;
   10346 
   10347 	/* Disable D0 LPLU. */
   10348 	wm_lplu_d0_disable(sc);
   10349 
   10350 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10351 	sc->sc_ctrl |= CTRL_SLU;
   10352 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10353 	    || (sc->sc_type > WM_T_82543)) {
   10354 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10355 	} else {
   10356 		sc->sc_ctrl &= ~CTRL_ASDE;
   10357 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10358 		if (ife->ifm_media & IFM_FDX)
   10359 			sc->sc_ctrl |= CTRL_FD;
   10360 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10361 		case IFM_10_T:
   10362 			sc->sc_ctrl |= CTRL_SPEED_10;
   10363 			break;
   10364 		case IFM_100_TX:
   10365 			sc->sc_ctrl |= CTRL_SPEED_100;
   10366 			break;
   10367 		case IFM_1000_T:
   10368 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10369 			break;
   10370 		case IFM_NONE:
   10371 			/* There is no specific setting for IFM_NONE */
   10372 			break;
   10373 		default:
   10374 			panic("wm_gmii_mediachange: bad media 0x%x",
   10375 			    ife->ifm_media);
   10376 		}
   10377 	}
   10378 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10379 	CSR_WRITE_FLUSH(sc);
   10380 	if (sc->sc_type <= WM_T_82543)
   10381 		wm_gmii_reset(sc);
   10382 
   10383 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10384 		return 0;
   10385 	return rc;
   10386 }
   10387 
   10388 /*
   10389  * wm_gmii_mediastatus:	[ifmedia interface function]
   10390  *
   10391  *	Get the current interface media status on a 1000BASE-T device.
   10392  */
   10393 static void
   10394 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10395 {
   10396 	struct wm_softc *sc = ifp->if_softc;
   10397 
   10398 	ether_mediastatus(ifp, ifmr);
   10399 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10400 	    | sc->sc_flowflags;
   10401 }
   10402 
   10403 #define	MDI_IO		CTRL_SWDPIN(2)
   10404 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10405 #define	MDI_CLK		CTRL_SWDPIN(3)
   10406 
   10407 static void
   10408 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10409 {
   10410 	uint32_t i, v;
   10411 
   10412 	v = CSR_READ(sc, WMREG_CTRL);
   10413 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10414 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10415 
   10416 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10417 		if (data & i)
   10418 			v |= MDI_IO;
   10419 		else
   10420 			v &= ~MDI_IO;
   10421 		CSR_WRITE(sc, WMREG_CTRL, v);
   10422 		CSR_WRITE_FLUSH(sc);
   10423 		delay(10);
   10424 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10425 		CSR_WRITE_FLUSH(sc);
   10426 		delay(10);
   10427 		CSR_WRITE(sc, WMREG_CTRL, v);
   10428 		CSR_WRITE_FLUSH(sc);
   10429 		delay(10);
   10430 	}
   10431 }
   10432 
   10433 static uint16_t
   10434 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10435 {
   10436 	uint32_t v, i;
   10437 	uint16_t data = 0;
   10438 
   10439 	v = CSR_READ(sc, WMREG_CTRL);
   10440 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10441 	v |= CTRL_SWDPIO(3);
   10442 
   10443 	CSR_WRITE(sc, WMREG_CTRL, v);
   10444 	CSR_WRITE_FLUSH(sc);
   10445 	delay(10);
   10446 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10447 	CSR_WRITE_FLUSH(sc);
   10448 	delay(10);
   10449 	CSR_WRITE(sc, WMREG_CTRL, v);
   10450 	CSR_WRITE_FLUSH(sc);
   10451 	delay(10);
   10452 
   10453 	for (i = 0; i < 16; i++) {
   10454 		data <<= 1;
   10455 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10456 		CSR_WRITE_FLUSH(sc);
   10457 		delay(10);
   10458 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10459 			data |= 1;
   10460 		CSR_WRITE(sc, WMREG_CTRL, v);
   10461 		CSR_WRITE_FLUSH(sc);
   10462 		delay(10);
   10463 	}
   10464 
   10465 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10466 	CSR_WRITE_FLUSH(sc);
   10467 	delay(10);
   10468 	CSR_WRITE(sc, WMREG_CTRL, v);
   10469 	CSR_WRITE_FLUSH(sc);
   10470 	delay(10);
   10471 
   10472 	return data;
   10473 }
   10474 
   10475 #undef MDI_IO
   10476 #undef MDI_DIR
   10477 #undef MDI_CLK
   10478 
   10479 /*
   10480  * wm_gmii_i82543_readreg:	[mii interface function]
   10481  *
   10482  *	Read a PHY register on the GMII (i82543 version).
   10483  */
   10484 static int
   10485 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10486 {
   10487 	struct wm_softc *sc = device_private(dev);
   10488 
   10489 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10490 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10491 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10492 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10493 
   10494 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10495 		device_xname(dev), phy, reg, *val));
   10496 
   10497 	return 0;
   10498 }
   10499 
   10500 /*
   10501  * wm_gmii_i82543_writereg:	[mii interface function]
   10502  *
   10503  *	Write a PHY register on the GMII (i82543 version).
   10504  */
   10505 static int
   10506 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10507 {
   10508 	struct wm_softc *sc = device_private(dev);
   10509 
   10510 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10511 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10512 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10513 	    (MII_COMMAND_START << 30), 32);
   10514 
   10515 	return 0;
   10516 }
   10517 
   10518 /*
   10519  * wm_gmii_mdic_readreg:	[mii interface function]
   10520  *
   10521  *	Read a PHY register on the GMII.
   10522  */
   10523 static int
   10524 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10525 {
   10526 	struct wm_softc *sc = device_private(dev);
   10527 	uint32_t mdic = 0;
   10528 	int i;
   10529 
   10530 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10531 	    && (reg > MII_ADDRMASK)) {
   10532 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10533 		    __func__, sc->sc_phytype, reg);
   10534 		reg &= MII_ADDRMASK;
   10535 	}
   10536 
   10537 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10538 	    MDIC_REGADD(reg));
   10539 
   10540 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10541 		delay(50);
   10542 		mdic = CSR_READ(sc, WMREG_MDIC);
   10543 		if (mdic & MDIC_READY)
   10544 			break;
   10545 	}
   10546 
   10547 	if ((mdic & MDIC_READY) == 0) {
   10548 		DPRINTF(WM_DEBUG_GMII,
   10549 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10550 			device_xname(dev), phy, reg));
   10551 		return ETIMEDOUT;
   10552 	} else if (mdic & MDIC_E) {
   10553 		/* This is normal if no PHY is present. */
   10554 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10555 			device_xname(sc->sc_dev), phy, reg));
   10556 		return -1;
   10557 	} else
   10558 		*val = MDIC_DATA(mdic);
   10559 
   10560 	/*
   10561 	 * Allow some time after each MDIC transaction to avoid
   10562 	 * reading duplicate data in the next MDIC transaction.
   10563 	 */
   10564 	if (sc->sc_type == WM_T_PCH2)
   10565 		delay(100);
   10566 
   10567 	return 0;
   10568 }
   10569 
   10570 /*
   10571  * wm_gmii_mdic_writereg:	[mii interface function]
   10572  *
   10573  *	Write a PHY register on the GMII.
   10574  */
   10575 static int
   10576 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10577 {
   10578 	struct wm_softc *sc = device_private(dev);
   10579 	uint32_t mdic = 0;
   10580 	int i;
   10581 
   10582 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10583 	    && (reg > MII_ADDRMASK)) {
   10584 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10585 		    __func__, sc->sc_phytype, reg);
   10586 		reg &= MII_ADDRMASK;
   10587 	}
   10588 
   10589 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10590 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10591 
   10592 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10593 		delay(50);
   10594 		mdic = CSR_READ(sc, WMREG_MDIC);
   10595 		if (mdic & MDIC_READY)
   10596 			break;
   10597 	}
   10598 
   10599 	if ((mdic & MDIC_READY) == 0) {
   10600 		DPRINTF(WM_DEBUG_GMII,
   10601 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10602 			device_xname(dev), phy, reg));
   10603 		return ETIMEDOUT;
   10604 	} else if (mdic & MDIC_E) {
   10605 		DPRINTF(WM_DEBUG_GMII,
   10606 		    ("%s: MDIC write error: phy %d reg %d\n",
   10607 			device_xname(dev), phy, reg));
   10608 		return -1;
   10609 	}
   10610 
   10611 	/*
   10612 	 * Allow some time after each MDIC transaction to avoid
   10613 	 * reading duplicate data in the next MDIC transaction.
   10614 	 */
   10615 	if (sc->sc_type == WM_T_PCH2)
   10616 		delay(100);
   10617 
   10618 	return 0;
   10619 }
   10620 
   10621 /*
   10622  * wm_gmii_i82544_readreg:	[mii interface function]
   10623  *
   10624  *	Read a PHY register on the GMII.
   10625  */
   10626 static int
   10627 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10628 {
   10629 	struct wm_softc *sc = device_private(dev);
   10630 	int rv;
   10631 
   10632 	if (sc->phy.acquire(sc)) {
   10633 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10634 		return -1;
   10635 	}
   10636 
   10637 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10638 
   10639 	sc->phy.release(sc);
   10640 
   10641 	return rv;
   10642 }
   10643 
   10644 static int
   10645 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10646 {
   10647 	struct wm_softc *sc = device_private(dev);
   10648 	int rv;
   10649 
   10650 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10651 		switch (sc->sc_phytype) {
   10652 		case WMPHY_IGP:
   10653 		case WMPHY_IGP_2:
   10654 		case WMPHY_IGP_3:
   10655 			rv = wm_gmii_mdic_writereg(dev, phy,
   10656 			    MII_IGPHY_PAGE_SELECT, reg);
   10657 			if (rv != 0)
   10658 				return rv;
   10659 			break;
   10660 		default:
   10661 #ifdef WM_DEBUG
   10662 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10663 			    __func__, sc->sc_phytype, reg);
   10664 #endif
   10665 			break;
   10666 		}
   10667 	}
   10668 
   10669 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10670 }
   10671 
   10672 /*
   10673  * wm_gmii_i82544_writereg:	[mii interface function]
   10674  *
   10675  *	Write a PHY register on the GMII.
   10676  */
   10677 static int
   10678 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10679 {
   10680 	struct wm_softc *sc = device_private(dev);
   10681 	int rv;
   10682 
   10683 	if (sc->phy.acquire(sc)) {
   10684 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10685 		return -1;
   10686 	}
   10687 
   10688 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10689 	sc->phy.release(sc);
   10690 
   10691 	return rv;
   10692 }
   10693 
   10694 static int
   10695 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10696 {
   10697 	struct wm_softc *sc = device_private(dev);
   10698 	int rv;
   10699 
   10700 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10701 		switch (sc->sc_phytype) {
   10702 		case WMPHY_IGP:
   10703 		case WMPHY_IGP_2:
   10704 		case WMPHY_IGP_3:
   10705 			rv = wm_gmii_mdic_writereg(dev, phy,
   10706 			    MII_IGPHY_PAGE_SELECT, reg);
   10707 			if (rv != 0)
   10708 				return rv;
   10709 			break;
   10710 		default:
   10711 #ifdef WM_DEBUG
   10712 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10713 			    __func__, sc->sc_phytype, reg);
   10714 #endif
   10715 			break;
   10716 		}
   10717 	}
   10718 
   10719 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10720 }
   10721 
   10722 /*
   10723  * wm_gmii_i80003_readreg:	[mii interface function]
   10724  *
   10725  *	Read a PHY register on the kumeran
   10726  * This could be handled by the PHY layer if we didn't have to lock the
   10727  * ressource ...
   10728  */
   10729 static int
   10730 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10731 {
   10732 	struct wm_softc *sc = device_private(dev);
   10733 	int page_select;
   10734 	uint16_t temp, temp2;
   10735 	int rv = 0;
   10736 
   10737 	if (phy != 1) /* Only one PHY on kumeran bus */
   10738 		return -1;
   10739 
   10740 	if (sc->phy.acquire(sc)) {
   10741 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10742 		return -1;
   10743 	}
   10744 
   10745 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10746 		page_select = GG82563_PHY_PAGE_SELECT;
   10747 	else {
   10748 		/*
   10749 		 * Use Alternative Page Select register to access registers
   10750 		 * 30 and 31.
   10751 		 */
   10752 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10753 	}
   10754 	temp = reg >> GG82563_PAGE_SHIFT;
   10755 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10756 		goto out;
   10757 
   10758 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10759 		/*
   10760 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10761 		 * register.
   10762 		 */
   10763 		delay(200);
   10764 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10765 		if ((rv != 0) || (temp2 != temp)) {
   10766 			device_printf(dev, "%s failed\n", __func__);
   10767 			rv = -1;
   10768 			goto out;
   10769 		}
   10770 		delay(200);
   10771 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10772 		delay(200);
   10773 	} else
   10774 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10775 
   10776 out:
   10777 	sc->phy.release(sc);
   10778 	return rv;
   10779 }
   10780 
   10781 /*
   10782  * wm_gmii_i80003_writereg:	[mii interface function]
   10783  *
   10784  *	Write a PHY register on the kumeran.
   10785  * This could be handled by the PHY layer if we didn't have to lock the
   10786  * ressource ...
   10787  */
   10788 static int
   10789 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10790 {
   10791 	struct wm_softc *sc = device_private(dev);
   10792 	int page_select, rv;
   10793 	uint16_t temp, temp2;
   10794 
   10795 	if (phy != 1) /* Only one PHY on kumeran bus */
   10796 		return -1;
   10797 
   10798 	if (sc->phy.acquire(sc)) {
   10799 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10800 		return -1;
   10801 	}
   10802 
   10803 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10804 		page_select = GG82563_PHY_PAGE_SELECT;
   10805 	else {
   10806 		/*
   10807 		 * Use Alternative Page Select register to access registers
   10808 		 * 30 and 31.
   10809 		 */
   10810 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10811 	}
   10812 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10813 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10814 		goto out;
   10815 
   10816 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10817 		/*
   10818 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10819 		 * register.
   10820 		 */
   10821 		delay(200);
   10822 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10823 		if ((rv != 0) || (temp2 != temp)) {
   10824 			device_printf(dev, "%s failed\n", __func__);
   10825 			rv = -1;
   10826 			goto out;
   10827 		}
   10828 		delay(200);
   10829 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10830 		delay(200);
   10831 	} else
   10832 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10833 
   10834 out:
   10835 	sc->phy.release(sc);
   10836 	return rv;
   10837 }
   10838 
   10839 /*
   10840  * wm_gmii_bm_readreg:	[mii interface function]
   10841  *
   10842  *	Read a PHY register on the kumeran
   10843  * This could be handled by the PHY layer if we didn't have to lock the
   10844  * ressource ...
   10845  */
   10846 static int
   10847 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10848 {
   10849 	struct wm_softc *sc = device_private(dev);
   10850 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10851 	int rv;
   10852 
   10853 	if (sc->phy.acquire(sc)) {
   10854 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10855 		return -1;
   10856 	}
   10857 
   10858 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10859 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10860 		    || (reg == 31)) ? 1 : phy;
   10861 	/* Page 800 works differently than the rest so it has its own func */
   10862 	if (page == BM_WUC_PAGE) {
   10863 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10864 		goto release;
   10865 	}
   10866 
   10867 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10868 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10869 		    && (sc->sc_type != WM_T_82583))
   10870 			rv = wm_gmii_mdic_writereg(dev, phy,
   10871 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10872 		else
   10873 			rv = wm_gmii_mdic_writereg(dev, phy,
   10874 			    BME1000_PHY_PAGE_SELECT, page);
   10875 		if (rv != 0)
   10876 			goto release;
   10877 	}
   10878 
   10879 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10880 
   10881 release:
   10882 	sc->phy.release(sc);
   10883 	return rv;
   10884 }
   10885 
   10886 /*
   10887  * wm_gmii_bm_writereg:	[mii interface function]
   10888  *
   10889  *	Write a PHY register on the kumeran.
   10890  * This could be handled by the PHY layer if we didn't have to lock the
   10891  * ressource ...
   10892  */
   10893 static int
   10894 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10895 {
   10896 	struct wm_softc *sc = device_private(dev);
   10897 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10898 	int rv;
   10899 
   10900 	if (sc->phy.acquire(sc)) {
   10901 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10902 		return -1;
   10903 	}
   10904 
   10905 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10906 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10907 		    || (reg == 31)) ? 1 : phy;
   10908 	/* Page 800 works differently than the rest so it has its own func */
   10909 	if (page == BM_WUC_PAGE) {
   10910 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10911 		goto release;
   10912 	}
   10913 
   10914 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10915 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10916 		    && (sc->sc_type != WM_T_82583))
   10917 			rv = wm_gmii_mdic_writereg(dev, phy,
   10918 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10919 		else
   10920 			rv = wm_gmii_mdic_writereg(dev, phy,
   10921 			    BME1000_PHY_PAGE_SELECT, page);
   10922 		if (rv != 0)
   10923 			goto release;
   10924 	}
   10925 
   10926 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10927 
   10928 release:
   10929 	sc->phy.release(sc);
   10930 	return rv;
   10931 }
   10932 
   10933 /*
   10934  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10935  *  @dev: pointer to the HW structure
   10936  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10937  *
   10938  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10939  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10940  */
   10941 static int
   10942 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10943 {
   10944 	uint16_t temp;
   10945 	int rv;
   10946 
   10947 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10948 		device_xname(dev), __func__));
   10949 
   10950 	if (!phy_regp)
   10951 		return -1;
   10952 
   10953 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10954 
   10955 	/* Select Port Control Registers page */
   10956 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10957 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10958 	if (rv != 0)
   10959 		return rv;
   10960 
   10961 	/* Read WUCE and save it */
   10962 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10963 	if (rv != 0)
   10964 		return rv;
   10965 
   10966 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10967 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10968 	 */
   10969 	temp = *phy_regp;
   10970 	temp |= BM_WUC_ENABLE_BIT;
   10971 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10972 
   10973 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10974 		return rv;
   10975 
   10976 	/* Select Host Wakeup Registers page - caller now able to write
   10977 	 * registers on the Wakeup registers page
   10978 	 */
   10979 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10980 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10981 }
   10982 
   10983 /*
   10984  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10985  *  @dev: pointer to the HW structure
   10986  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10987  *
   10988  *  Restore BM_WUC_ENABLE_REG to its original value.
   10989  *
   10990  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10991  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10992  *  caller.
   10993  */
   10994 static int
   10995 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10996 {
   10997 
   10998 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10999 		device_xname(dev), __func__));
   11000 
   11001 	if (!phy_regp)
   11002 		return -1;
   11003 
   11004 	/* Select Port Control Registers page */
   11005 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11006 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11007 
   11008 	/* Restore 769.17 to its original value */
   11009 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11010 
   11011 	return 0;
   11012 }
   11013 
   11014 /*
   11015  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11016  *  @sc: pointer to the HW structure
   11017  *  @offset: register offset to be read or written
   11018  *  @val: pointer to the data to read or write
   11019  *  @rd: determines if operation is read or write
   11020  *  @page_set: BM_WUC_PAGE already set and access enabled
   11021  *
   11022  *  Read the PHY register at offset and store the retrieved information in
   11023  *  data, or write data to PHY register at offset.  Note the procedure to
   11024  *  access the PHY wakeup registers is different than reading the other PHY
   11025  *  registers. It works as such:
   11026  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11027  *  2) Set page to 800 for host (801 if we were manageability)
   11028  *  3) Write the address using the address opcode (0x11)
   11029  *  4) Read or write the data using the data opcode (0x12)
   11030  *  5) Restore 769.17.2 to its original value
   11031  *
   11032  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11033  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11034  *
   11035  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11036  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11037  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11038  */
   11039 static int
   11040 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11041 	bool page_set)
   11042 {
   11043 	struct wm_softc *sc = device_private(dev);
   11044 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11045 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11046 	uint16_t wuce;
   11047 	int rv = 0;
   11048 
   11049 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11050 		device_xname(dev), __func__));
   11051 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11052 	if ((sc->sc_type == WM_T_PCH)
   11053 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11054 		device_printf(dev,
   11055 		    "Attempting to access page %d while gig enabled.\n", page);
   11056 	}
   11057 
   11058 	if (!page_set) {
   11059 		/* Enable access to PHY wakeup registers */
   11060 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11061 		if (rv != 0) {
   11062 			device_printf(dev,
   11063 			    "%s: Could not enable PHY wakeup reg access\n",
   11064 			    __func__);
   11065 			return rv;
   11066 		}
   11067 	}
   11068 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11069 		device_xname(sc->sc_dev), __func__, page, regnum));
   11070 
   11071 	/*
   11072 	 * 2) Access PHY wakeup register.
   11073 	 * See wm_access_phy_wakeup_reg_bm.
   11074 	 */
   11075 
   11076 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11077 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11078 	if (rv != 0)
   11079 		return rv;
   11080 
   11081 	if (rd) {
   11082 		/* Read the Wakeup register page value using opcode 0x12 */
   11083 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11084 	} else {
   11085 		/* Write the Wakeup register page value using opcode 0x12 */
   11086 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11087 	}
   11088 	if (rv != 0)
   11089 		return rv;
   11090 
   11091 	if (!page_set)
   11092 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11093 
   11094 	return rv;
   11095 }
   11096 
   11097 /*
   11098  * wm_gmii_hv_readreg:	[mii interface function]
   11099  *
   11100  *	Read a PHY register on the kumeran
   11101  * This could be handled by the PHY layer if we didn't have to lock the
   11102  * ressource ...
   11103  */
   11104 static int
   11105 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11106 {
   11107 	struct wm_softc *sc = device_private(dev);
   11108 	int rv;
   11109 
   11110 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11111 		device_xname(dev), __func__));
   11112 	if (sc->phy.acquire(sc)) {
   11113 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11114 		return -1;
   11115 	}
   11116 
   11117 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11118 	sc->phy.release(sc);
   11119 	return rv;
   11120 }
   11121 
   11122 static int
   11123 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11124 {
   11125 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11126 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11127 	int rv;
   11128 
   11129 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11130 
   11131 	/* Page 800 works differently than the rest so it has its own func */
   11132 	if (page == BM_WUC_PAGE)
   11133 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11134 
   11135 	/*
   11136 	 * Lower than page 768 works differently than the rest so it has its
   11137 	 * own func
   11138 	 */
   11139 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11140 		printf("gmii_hv_readreg!!!\n");
   11141 		return -1;
   11142 	}
   11143 
   11144 	/*
   11145 	 * XXX I21[789] documents say that the SMBus Address register is at
   11146 	 * PHY address 01, Page 0 (not 768), Register 26.
   11147 	 */
   11148 	if (page == HV_INTC_FC_PAGE_START)
   11149 		page = 0;
   11150 
   11151 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11152 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11153 		    page << BME1000_PAGE_SHIFT);
   11154 		if (rv != 0)
   11155 			return rv;
   11156 	}
   11157 
   11158 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11159 }
   11160 
   11161 /*
   11162  * wm_gmii_hv_writereg:	[mii interface function]
   11163  *
   11164  *	Write a PHY register on the kumeran.
   11165  * This could be handled by the PHY layer if we didn't have to lock the
   11166  * ressource ...
   11167  */
   11168 static int
   11169 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11170 {
   11171 	struct wm_softc *sc = device_private(dev);
   11172 	int rv;
   11173 
   11174 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11175 		device_xname(dev), __func__));
   11176 
   11177 	if (sc->phy.acquire(sc)) {
   11178 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11179 		return -1;
   11180 	}
   11181 
   11182 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11183 	sc->phy.release(sc);
   11184 
   11185 	return rv;
   11186 }
   11187 
   11188 static int
   11189 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11190 {
   11191 	struct wm_softc *sc = device_private(dev);
   11192 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11193 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11194 	int rv;
   11195 
   11196 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11197 
   11198 	/* Page 800 works differently than the rest so it has its own func */
   11199 	if (page == BM_WUC_PAGE)
   11200 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11201 		    false);
   11202 
   11203 	/*
   11204 	 * Lower than page 768 works differently than the rest so it has its
   11205 	 * own func
   11206 	 */
   11207 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11208 		printf("gmii_hv_writereg!!!\n");
   11209 		return -1;
   11210 	}
   11211 
   11212 	{
   11213 		/*
   11214 		 * XXX I21[789] documents say that the SMBus Address register
   11215 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11216 		 */
   11217 		if (page == HV_INTC_FC_PAGE_START)
   11218 			page = 0;
   11219 
   11220 		/*
   11221 		 * XXX Workaround MDIO accesses being disabled after entering
   11222 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11223 		 * register is set)
   11224 		 */
   11225 		if (sc->sc_phytype == WMPHY_82578) {
   11226 			struct mii_softc *child;
   11227 
   11228 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11229 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11230 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11231 			    && ((val & (1 << 11)) != 0)) {
   11232 				printf("XXX need workaround\n");
   11233 			}
   11234 		}
   11235 
   11236 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11237 			rv = wm_gmii_mdic_writereg(dev, 1,
   11238 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11239 			if (rv != 0)
   11240 				return rv;
   11241 		}
   11242 	}
   11243 
   11244 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11245 }
   11246 
   11247 /*
   11248  * wm_gmii_82580_readreg:	[mii interface function]
   11249  *
   11250  *	Read a PHY register on the 82580 and I350.
   11251  * This could be handled by the PHY layer if we didn't have to lock the
   11252  * ressource ...
   11253  */
   11254 static int
   11255 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11256 {
   11257 	struct wm_softc *sc = device_private(dev);
   11258 	int rv;
   11259 
   11260 	if (sc->phy.acquire(sc) != 0) {
   11261 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11262 		return -1;
   11263 	}
   11264 
   11265 #ifdef DIAGNOSTIC
   11266 	if (reg > MII_ADDRMASK) {
   11267 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11268 		    __func__, sc->sc_phytype, reg);
   11269 		reg &= MII_ADDRMASK;
   11270 	}
   11271 #endif
   11272 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11273 
   11274 	sc->phy.release(sc);
   11275 	return rv;
   11276 }
   11277 
   11278 /*
   11279  * wm_gmii_82580_writereg:	[mii interface function]
   11280  *
   11281  *	Write a PHY register on the 82580 and I350.
   11282  * This could be handled by the PHY layer if we didn't have to lock the
   11283  * ressource ...
   11284  */
   11285 static int
   11286 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11287 {
   11288 	struct wm_softc *sc = device_private(dev);
   11289 	int rv;
   11290 
   11291 	if (sc->phy.acquire(sc) != 0) {
   11292 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11293 		return -1;
   11294 	}
   11295 
   11296 #ifdef DIAGNOSTIC
   11297 	if (reg > MII_ADDRMASK) {
   11298 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11299 		    __func__, sc->sc_phytype, reg);
   11300 		reg &= MII_ADDRMASK;
   11301 	}
   11302 #endif
   11303 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11304 
   11305 	sc->phy.release(sc);
   11306 	return rv;
   11307 }
   11308 
   11309 /*
   11310  * wm_gmii_gs40g_readreg:	[mii interface function]
   11311  *
   11312  *	Read a PHY register on the I2100 and I211.
   11313  * This could be handled by the PHY layer if we didn't have to lock the
   11314  * ressource ...
   11315  */
   11316 static int
   11317 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11318 {
   11319 	struct wm_softc *sc = device_private(dev);
   11320 	int page, offset;
   11321 	int rv;
   11322 
   11323 	/* Acquire semaphore */
   11324 	if (sc->phy.acquire(sc)) {
   11325 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11326 		return -1;
   11327 	}
   11328 
   11329 	/* Page select */
   11330 	page = reg >> GS40G_PAGE_SHIFT;
   11331 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11332 	if (rv != 0)
   11333 		goto release;
   11334 
   11335 	/* Read reg */
   11336 	offset = reg & GS40G_OFFSET_MASK;
   11337 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11338 
   11339 release:
   11340 	sc->phy.release(sc);
   11341 	return rv;
   11342 }
   11343 
   11344 /*
   11345  * wm_gmii_gs40g_writereg:	[mii interface function]
   11346  *
   11347  *	Write a PHY register on the I210 and I211.
   11348  * This could be handled by the PHY layer if we didn't have to lock the
   11349  * ressource ...
   11350  */
   11351 static int
   11352 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11353 {
   11354 	struct wm_softc *sc = device_private(dev);
   11355 	uint16_t page;
   11356 	int offset, rv;
   11357 
   11358 	/* Acquire semaphore */
   11359 	if (sc->phy.acquire(sc)) {
   11360 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11361 		return -1;
   11362 	}
   11363 
   11364 	/* Page select */
   11365 	page = reg >> GS40G_PAGE_SHIFT;
   11366 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11367 	if (rv != 0)
   11368 		goto release;
   11369 
   11370 	/* Write reg */
   11371 	offset = reg & GS40G_OFFSET_MASK;
   11372 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11373 
   11374 release:
   11375 	/* Release semaphore */
   11376 	sc->phy.release(sc);
   11377 	return rv;
   11378 }
   11379 
   11380 /*
   11381  * wm_gmii_statchg:	[mii interface function]
   11382  *
   11383  *	Callback from MII layer when media changes.
   11384  */
   11385 static void
   11386 wm_gmii_statchg(struct ifnet *ifp)
   11387 {
   11388 	struct wm_softc *sc = ifp->if_softc;
   11389 	struct mii_data *mii = &sc->sc_mii;
   11390 
   11391 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11392 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11393 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11394 
   11395 	/* Get flow control negotiation result. */
   11396 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11397 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11398 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11399 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11400 	}
   11401 
   11402 	if (sc->sc_flowflags & IFM_FLOW) {
   11403 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11404 			sc->sc_ctrl |= CTRL_TFCE;
   11405 			sc->sc_fcrtl |= FCRTL_XONE;
   11406 		}
   11407 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11408 			sc->sc_ctrl |= CTRL_RFCE;
   11409 	}
   11410 
   11411 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11412 		DPRINTF(WM_DEBUG_LINK,
   11413 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11414 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11415 	} else {
   11416 		DPRINTF(WM_DEBUG_LINK,
   11417 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11418 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11419 	}
   11420 
   11421 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11422 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11423 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11424 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11425 	if (sc->sc_type == WM_T_80003) {
   11426 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11427 		case IFM_1000_T:
   11428 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11429 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11430 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11431 			break;
   11432 		default:
   11433 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11434 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11435 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11436 			break;
   11437 		}
   11438 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11439 	}
   11440 }
   11441 
   11442 /* kumeran related (80003, ICH* and PCH*) */
   11443 
   11444 /*
   11445  * wm_kmrn_readreg:
   11446  *
   11447  *	Read a kumeran register
   11448  */
   11449 static int
   11450 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11451 {
   11452 	int rv;
   11453 
   11454 	if (sc->sc_type == WM_T_80003)
   11455 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11456 	else
   11457 		rv = sc->phy.acquire(sc);
   11458 	if (rv != 0) {
   11459 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11460 		    __func__);
   11461 		return rv;
   11462 	}
   11463 
   11464 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11465 
   11466 	if (sc->sc_type == WM_T_80003)
   11467 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11468 	else
   11469 		sc->phy.release(sc);
   11470 
   11471 	return rv;
   11472 }
   11473 
   11474 static int
   11475 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11476 {
   11477 
   11478 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11479 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11480 	    KUMCTRLSTA_REN);
   11481 	CSR_WRITE_FLUSH(sc);
   11482 	delay(2);
   11483 
   11484 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11485 
   11486 	return 0;
   11487 }
   11488 
   11489 /*
   11490  * wm_kmrn_writereg:
   11491  *
   11492  *	Write a kumeran register
   11493  */
   11494 static int
   11495 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11496 {
   11497 	int rv;
   11498 
   11499 	if (sc->sc_type == WM_T_80003)
   11500 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11501 	else
   11502 		rv = sc->phy.acquire(sc);
   11503 	if (rv != 0) {
   11504 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11505 		    __func__);
   11506 		return rv;
   11507 	}
   11508 
   11509 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11510 
   11511 	if (sc->sc_type == WM_T_80003)
   11512 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11513 	else
   11514 		sc->phy.release(sc);
   11515 
   11516 	return rv;
   11517 }
   11518 
   11519 static int
   11520 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11521 {
   11522 
   11523 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11524 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11525 
   11526 	return 0;
   11527 }
   11528 
   11529 /*
   11530  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11531  * This access method is different from IEEE MMD.
   11532  */
   11533 static int
   11534 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11535 {
   11536 	struct wm_softc *sc = device_private(dev);
   11537 	int rv;
   11538 
   11539 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11540 	if (rv != 0)
   11541 		return rv;
   11542 
   11543 	if (rd)
   11544 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11545 	else
   11546 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11547 	return rv;
   11548 }
   11549 
   11550 static int
   11551 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11552 {
   11553 
   11554 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11555 }
   11556 
   11557 static int
   11558 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11559 {
   11560 
   11561 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11562 }
   11563 
   11564 /* SGMII related */
   11565 
   11566 /*
   11567  * wm_sgmii_uses_mdio
   11568  *
   11569  * Check whether the transaction is to the internal PHY or the external
   11570  * MDIO interface. Return true if it's MDIO.
   11571  */
   11572 static bool
   11573 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11574 {
   11575 	uint32_t reg;
   11576 	bool ismdio = false;
   11577 
   11578 	switch (sc->sc_type) {
   11579 	case WM_T_82575:
   11580 	case WM_T_82576:
   11581 		reg = CSR_READ(sc, WMREG_MDIC);
   11582 		ismdio = ((reg & MDIC_DEST) != 0);
   11583 		break;
   11584 	case WM_T_82580:
   11585 	case WM_T_I350:
   11586 	case WM_T_I354:
   11587 	case WM_T_I210:
   11588 	case WM_T_I211:
   11589 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11590 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11591 		break;
   11592 	default:
   11593 		break;
   11594 	}
   11595 
   11596 	return ismdio;
   11597 }
   11598 
   11599 /*
   11600  * wm_sgmii_readreg:	[mii interface function]
   11601  *
   11602  *	Read a PHY register on the SGMII
   11603  * This could be handled by the PHY layer if we didn't have to lock the
   11604  * ressource ...
   11605  */
   11606 static int
   11607 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11608 {
   11609 	struct wm_softc *sc = device_private(dev);
   11610 	int rv;
   11611 
   11612 	if (sc->phy.acquire(sc)) {
   11613 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11614 		return -1;
   11615 	}
   11616 
   11617 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11618 
   11619 	sc->phy.release(sc);
   11620 	return rv;
   11621 }
   11622 
   11623 static int
   11624 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11625 {
   11626 	struct wm_softc *sc = device_private(dev);
   11627 	uint32_t i2ccmd;
   11628 	int i, rv;
   11629 
   11630 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11631 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11632 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11633 
   11634 	/* Poll the ready bit */
   11635 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11636 		delay(50);
   11637 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11638 		if (i2ccmd & I2CCMD_READY)
   11639 			break;
   11640 	}
   11641 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11642 		device_printf(dev, "I2CCMD Read did not complete\n");
   11643 		rv = ETIMEDOUT;
   11644 	}
   11645 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11646 		device_printf(dev, "I2CCMD Error bit set\n");
   11647 		rv = EIO;
   11648 	}
   11649 
   11650 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11651 
   11652 	return rv;
   11653 }
   11654 
   11655 /*
   11656  * wm_sgmii_writereg:	[mii interface function]
   11657  *
   11658  *	Write a PHY register on the SGMII.
   11659  * This could be handled by the PHY layer if we didn't have to lock the
   11660  * ressource ...
   11661  */
   11662 static int
   11663 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11664 {
   11665 	struct wm_softc *sc = device_private(dev);
   11666 	int rv;
   11667 
   11668 	if (sc->phy.acquire(sc) != 0) {
   11669 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11670 		return -1;
   11671 	}
   11672 
   11673 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11674 
   11675 	sc->phy.release(sc);
   11676 
   11677 	return rv;
   11678 }
   11679 
   11680 static int
   11681 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11682 {
   11683 	struct wm_softc *sc = device_private(dev);
   11684 	uint32_t i2ccmd;
   11685 	uint16_t swapdata;
   11686 	int rv = 0;
   11687 	int i;
   11688 
   11689 	/* Swap the data bytes for the I2C interface */
   11690 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11691 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11692 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11693 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11694 
   11695 	/* Poll the ready bit */
   11696 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11697 		delay(50);
   11698 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11699 		if (i2ccmd & I2CCMD_READY)
   11700 			break;
   11701 	}
   11702 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11703 		device_printf(dev, "I2CCMD Write did not complete\n");
   11704 		rv = ETIMEDOUT;
   11705 	}
   11706 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11707 		device_printf(dev, "I2CCMD Error bit set\n");
   11708 		rv = EIO;
   11709 	}
   11710 
   11711 	return rv;
   11712 }
   11713 
   11714 /* TBI related */
   11715 
   11716 static bool
   11717 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11718 {
   11719 	bool sig;
   11720 
   11721 	sig = ctrl & CTRL_SWDPIN(1);
   11722 
   11723 	/*
   11724 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11725 	 * detect a signal, 1 if they don't.
   11726 	 */
   11727 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11728 		sig = !sig;
   11729 
   11730 	return sig;
   11731 }
   11732 
   11733 /*
   11734  * wm_tbi_mediainit:
   11735  *
   11736  *	Initialize media for use on 1000BASE-X devices.
   11737  */
   11738 static void
   11739 wm_tbi_mediainit(struct wm_softc *sc)
   11740 {
   11741 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11742 	const char *sep = "";
   11743 
   11744 	if (sc->sc_type < WM_T_82543)
   11745 		sc->sc_tipg = TIPG_WM_DFLT;
   11746 	else
   11747 		sc->sc_tipg = TIPG_LG_DFLT;
   11748 
   11749 	sc->sc_tbi_serdes_anegticks = 5;
   11750 
   11751 	/* Initialize our media structures */
   11752 	sc->sc_mii.mii_ifp = ifp;
   11753 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11754 
   11755 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11756 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11757 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11758 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11759 	else
   11760 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11761 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11762 
   11763 	/*
   11764 	 * SWD Pins:
   11765 	 *
   11766 	 *	0 = Link LED (output)
   11767 	 *	1 = Loss Of Signal (input)
   11768 	 */
   11769 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11770 
   11771 	/* XXX Perhaps this is only for TBI */
   11772 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11773 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11774 
   11775 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11776 		sc->sc_ctrl &= ~CTRL_LRST;
   11777 
   11778 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11779 
   11780 #define	ADD(ss, mm, dd)							\
   11781 do {									\
   11782 	aprint_normal("%s%s", sep, ss);					\
   11783 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11784 	sep = ", ";							\
   11785 } while (/*CONSTCOND*/0)
   11786 
   11787 	aprint_normal_dev(sc->sc_dev, "");
   11788 
   11789 	if (sc->sc_type == WM_T_I354) {
   11790 		uint32_t status;
   11791 
   11792 		status = CSR_READ(sc, WMREG_STATUS);
   11793 		if (((status & STATUS_2P5_SKU) != 0)
   11794 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11795 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11796 		} else
   11797 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11798 	} else if (sc->sc_type == WM_T_82545) {
   11799 		/* Only 82545 is LX (XXX except SFP) */
   11800 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11801 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11802 	} else {
   11803 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11804 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11805 	}
   11806 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11807 	aprint_normal("\n");
   11808 
   11809 #undef ADD
   11810 
   11811 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11812 }
   11813 
   11814 /*
   11815  * wm_tbi_mediachange:	[ifmedia interface function]
   11816  *
   11817  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11818  */
   11819 static int
   11820 wm_tbi_mediachange(struct ifnet *ifp)
   11821 {
   11822 	struct wm_softc *sc = ifp->if_softc;
   11823 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11824 	uint32_t status, ctrl;
   11825 	bool signal;
   11826 	int i;
   11827 
   11828 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11829 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11830 		/* XXX need some work for >= 82571 and < 82575 */
   11831 		if (sc->sc_type < WM_T_82575)
   11832 			return 0;
   11833 	}
   11834 
   11835 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11836 	    || (sc->sc_type >= WM_T_82575))
   11837 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11838 
   11839 	sc->sc_ctrl &= ~CTRL_LRST;
   11840 	sc->sc_txcw = TXCW_ANE;
   11841 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11842 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11843 	else if (ife->ifm_media & IFM_FDX)
   11844 		sc->sc_txcw |= TXCW_FD;
   11845 	else
   11846 		sc->sc_txcw |= TXCW_HD;
   11847 
   11848 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11849 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11850 
   11851 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11852 		device_xname(sc->sc_dev), sc->sc_txcw));
   11853 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11854 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11855 	CSR_WRITE_FLUSH(sc);
   11856 	delay(1000);
   11857 
   11858 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11859 	signal = wm_tbi_havesignal(sc, ctrl);
   11860 
   11861 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11862 		signal));
   11863 
   11864 	if (signal) {
   11865 		/* Have signal; wait for the link to come up. */
   11866 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11867 			delay(10000);
   11868 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11869 				break;
   11870 		}
   11871 
   11872 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11873 			device_xname(sc->sc_dev), i));
   11874 
   11875 		status = CSR_READ(sc, WMREG_STATUS);
   11876 		DPRINTF(WM_DEBUG_LINK,
   11877 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11878 			device_xname(sc->sc_dev), status, STATUS_LU));
   11879 		if (status & STATUS_LU) {
   11880 			/* Link is up. */
   11881 			DPRINTF(WM_DEBUG_LINK,
   11882 			    ("%s: LINK: set media -> link up %s\n",
   11883 				device_xname(sc->sc_dev),
   11884 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11885 
   11886 			/*
   11887 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11888 			 * so we should update sc->sc_ctrl
   11889 			 */
   11890 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11891 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11892 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11893 			if (status & STATUS_FD)
   11894 				sc->sc_tctl |=
   11895 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11896 			else
   11897 				sc->sc_tctl |=
   11898 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11899 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11900 				sc->sc_fcrtl |= FCRTL_XONE;
   11901 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11902 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11903 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11904 			sc->sc_tbi_linkup = 1;
   11905 		} else {
   11906 			if (i == WM_LINKUP_TIMEOUT)
   11907 				wm_check_for_link(sc);
   11908 			/* Link is down. */
   11909 			DPRINTF(WM_DEBUG_LINK,
   11910 			    ("%s: LINK: set media -> link down\n",
   11911 				device_xname(sc->sc_dev)));
   11912 			sc->sc_tbi_linkup = 0;
   11913 		}
   11914 	} else {
   11915 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11916 			device_xname(sc->sc_dev)));
   11917 		sc->sc_tbi_linkup = 0;
   11918 	}
   11919 
   11920 	wm_tbi_serdes_set_linkled(sc);
   11921 
   11922 	return 0;
   11923 }
   11924 
   11925 /*
   11926  * wm_tbi_mediastatus:	[ifmedia interface function]
   11927  *
   11928  *	Get the current interface media status on a 1000BASE-X device.
   11929  */
   11930 static void
   11931 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11932 {
   11933 	struct wm_softc *sc = ifp->if_softc;
   11934 	uint32_t ctrl, status;
   11935 
   11936 	ifmr->ifm_status = IFM_AVALID;
   11937 	ifmr->ifm_active = IFM_ETHER;
   11938 
   11939 	status = CSR_READ(sc, WMREG_STATUS);
   11940 	if ((status & STATUS_LU) == 0) {
   11941 		ifmr->ifm_active |= IFM_NONE;
   11942 		return;
   11943 	}
   11944 
   11945 	ifmr->ifm_status |= IFM_ACTIVE;
   11946 	/* Only 82545 is LX */
   11947 	if (sc->sc_type == WM_T_82545)
   11948 		ifmr->ifm_active |= IFM_1000_LX;
   11949 	else
   11950 		ifmr->ifm_active |= IFM_1000_SX;
   11951 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11952 		ifmr->ifm_active |= IFM_FDX;
   11953 	else
   11954 		ifmr->ifm_active |= IFM_HDX;
   11955 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11956 	if (ctrl & CTRL_RFCE)
   11957 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11958 	if (ctrl & CTRL_TFCE)
   11959 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11960 }
   11961 
   11962 /* XXX TBI only */
   11963 static int
   11964 wm_check_for_link(struct wm_softc *sc)
   11965 {
   11966 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11967 	uint32_t rxcw;
   11968 	uint32_t ctrl;
   11969 	uint32_t status;
   11970 	bool signal;
   11971 
   11972 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11973 		device_xname(sc->sc_dev), __func__));
   11974 
   11975 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11976 		/* XXX need some work for >= 82571 */
   11977 		if (sc->sc_type >= WM_T_82571) {
   11978 			sc->sc_tbi_linkup = 1;
   11979 			return 0;
   11980 		}
   11981 	}
   11982 
   11983 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11984 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11985 	status = CSR_READ(sc, WMREG_STATUS);
   11986 	signal = wm_tbi_havesignal(sc, ctrl);
   11987 
   11988 	DPRINTF(WM_DEBUG_LINK,
   11989 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11990 		device_xname(sc->sc_dev), __func__, signal,
   11991 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11992 
   11993 	/*
   11994 	 * SWDPIN   LU RXCW
   11995 	 *	0    0	  0
   11996 	 *	0    0	  1	(should not happen)
   11997 	 *	0    1	  0	(should not happen)
   11998 	 *	0    1	  1	(should not happen)
   11999 	 *	1    0	  0	Disable autonego and force linkup
   12000 	 *	1    0	  1	got /C/ but not linkup yet
   12001 	 *	1    1	  0	(linkup)
   12002 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12003 	 *
   12004 	 */
   12005 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12006 		DPRINTF(WM_DEBUG_LINK,
   12007 		    ("%s: %s: force linkup and fullduplex\n",
   12008 			device_xname(sc->sc_dev), __func__));
   12009 		sc->sc_tbi_linkup = 0;
   12010 		/* Disable auto-negotiation in the TXCW register */
   12011 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12012 
   12013 		/*
   12014 		 * Force link-up and also force full-duplex.
   12015 		 *
   12016 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12017 		 * so we should update sc->sc_ctrl
   12018 		 */
   12019 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12020 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12021 	} else if (((status & STATUS_LU) != 0)
   12022 	    && ((rxcw & RXCW_C) != 0)
   12023 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12024 		sc->sc_tbi_linkup = 1;
   12025 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12026 			device_xname(sc->sc_dev),
   12027 			__func__));
   12028 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12029 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12030 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12031 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12032 			device_xname(sc->sc_dev), __func__));
   12033 	} else {
   12034 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12035 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12036 			status));
   12037 	}
   12038 
   12039 	return 0;
   12040 }
   12041 
   12042 /*
   12043  * wm_tbi_tick:
   12044  *
   12045  *	Check the link on TBI devices.
   12046  *	This function acts as mii_tick().
   12047  */
   12048 static void
   12049 wm_tbi_tick(struct wm_softc *sc)
   12050 {
   12051 	struct mii_data *mii = &sc->sc_mii;
   12052 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12053 	uint32_t status;
   12054 
   12055 	KASSERT(WM_CORE_LOCKED(sc));
   12056 
   12057 	status = CSR_READ(sc, WMREG_STATUS);
   12058 
   12059 	/* XXX is this needed? */
   12060 	(void)CSR_READ(sc, WMREG_RXCW);
   12061 	(void)CSR_READ(sc, WMREG_CTRL);
   12062 
   12063 	/* set link status */
   12064 	if ((status & STATUS_LU) == 0) {
   12065 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12066 			device_xname(sc->sc_dev)));
   12067 		sc->sc_tbi_linkup = 0;
   12068 	} else if (sc->sc_tbi_linkup == 0) {
   12069 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12070 			device_xname(sc->sc_dev),
   12071 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12072 		sc->sc_tbi_linkup = 1;
   12073 		sc->sc_tbi_serdes_ticks = 0;
   12074 	}
   12075 
   12076 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12077 		goto setled;
   12078 
   12079 	if ((status & STATUS_LU) == 0) {
   12080 		sc->sc_tbi_linkup = 0;
   12081 		/* If the timer expired, retry autonegotiation */
   12082 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12083 		    && (++sc->sc_tbi_serdes_ticks
   12084 			>= sc->sc_tbi_serdes_anegticks)) {
   12085 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12086 			sc->sc_tbi_serdes_ticks = 0;
   12087 			/*
   12088 			 * Reset the link, and let autonegotiation do
   12089 			 * its thing
   12090 			 */
   12091 			sc->sc_ctrl |= CTRL_LRST;
   12092 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12093 			CSR_WRITE_FLUSH(sc);
   12094 			delay(1000);
   12095 			sc->sc_ctrl &= ~CTRL_LRST;
   12096 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12097 			CSR_WRITE_FLUSH(sc);
   12098 			delay(1000);
   12099 			CSR_WRITE(sc, WMREG_TXCW,
   12100 			    sc->sc_txcw & ~TXCW_ANE);
   12101 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12102 		}
   12103 	}
   12104 
   12105 setled:
   12106 	wm_tbi_serdes_set_linkled(sc);
   12107 }
   12108 
   12109 /* SERDES related */
   12110 static void
   12111 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12112 {
   12113 	uint32_t reg;
   12114 
   12115 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12116 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12117 		return;
   12118 
   12119 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12120 	reg |= PCS_CFG_PCS_EN;
   12121 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12122 
   12123 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12124 	reg &= ~CTRL_EXT_SWDPIN(3);
   12125 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12126 	CSR_WRITE_FLUSH(sc);
   12127 }
   12128 
   12129 static int
   12130 wm_serdes_mediachange(struct ifnet *ifp)
   12131 {
   12132 	struct wm_softc *sc = ifp->if_softc;
   12133 	bool pcs_autoneg = true; /* XXX */
   12134 	uint32_t ctrl_ext, pcs_lctl, reg;
   12135 
   12136 	/* XXX Currently, this function is not called on 8257[12] */
   12137 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12138 	    || (sc->sc_type >= WM_T_82575))
   12139 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12140 
   12141 	wm_serdes_power_up_link_82575(sc);
   12142 
   12143 	sc->sc_ctrl |= CTRL_SLU;
   12144 
   12145 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12146 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12147 
   12148 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12149 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12150 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12151 	case CTRL_EXT_LINK_MODE_SGMII:
   12152 		pcs_autoneg = true;
   12153 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12154 		break;
   12155 	case CTRL_EXT_LINK_MODE_1000KX:
   12156 		pcs_autoneg = false;
   12157 		/* FALLTHROUGH */
   12158 	default:
   12159 		if ((sc->sc_type == WM_T_82575)
   12160 		    || (sc->sc_type == WM_T_82576)) {
   12161 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12162 				pcs_autoneg = false;
   12163 		}
   12164 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12165 		    | CTRL_FRCFDX;
   12166 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12167 	}
   12168 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12169 
   12170 	if (pcs_autoneg) {
   12171 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12172 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12173 
   12174 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12175 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12176 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12177 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12178 	} else
   12179 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12180 
   12181 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12182 
   12183 
   12184 	return 0;
   12185 }
   12186 
   12187 static void
   12188 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12189 {
   12190 	struct wm_softc *sc = ifp->if_softc;
   12191 	struct mii_data *mii = &sc->sc_mii;
   12192 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12193 	uint32_t pcs_adv, pcs_lpab, reg;
   12194 
   12195 	ifmr->ifm_status = IFM_AVALID;
   12196 	ifmr->ifm_active = IFM_ETHER;
   12197 
   12198 	/* Check PCS */
   12199 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12200 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12201 		ifmr->ifm_active |= IFM_NONE;
   12202 		sc->sc_tbi_linkup = 0;
   12203 		goto setled;
   12204 	}
   12205 
   12206 	sc->sc_tbi_linkup = 1;
   12207 	ifmr->ifm_status |= IFM_ACTIVE;
   12208 	if (sc->sc_type == WM_T_I354) {
   12209 		uint32_t status;
   12210 
   12211 		status = CSR_READ(sc, WMREG_STATUS);
   12212 		if (((status & STATUS_2P5_SKU) != 0)
   12213 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12214 			ifmr->ifm_active |= IFM_2500_KX;
   12215 		} else
   12216 			ifmr->ifm_active |= IFM_1000_KX;
   12217 	} else {
   12218 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12219 		case PCS_LSTS_SPEED_10:
   12220 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12221 			break;
   12222 		case PCS_LSTS_SPEED_100:
   12223 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12224 			break;
   12225 		case PCS_LSTS_SPEED_1000:
   12226 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12227 			break;
   12228 		default:
   12229 			device_printf(sc->sc_dev, "Unknown speed\n");
   12230 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12231 			break;
   12232 		}
   12233 	}
   12234 	if ((reg & PCS_LSTS_FDX) != 0)
   12235 		ifmr->ifm_active |= IFM_FDX;
   12236 	else
   12237 		ifmr->ifm_active |= IFM_HDX;
   12238 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12239 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12240 		/* Check flow */
   12241 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12242 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12243 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12244 			goto setled;
   12245 		}
   12246 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12247 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12248 		DPRINTF(WM_DEBUG_LINK,
   12249 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12250 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12251 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12252 			mii->mii_media_active |= IFM_FLOW
   12253 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12254 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12255 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12256 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12257 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12258 			mii->mii_media_active |= IFM_FLOW
   12259 			    | IFM_ETH_TXPAUSE;
   12260 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12261 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12262 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12263 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12264 			mii->mii_media_active |= IFM_FLOW
   12265 			    | IFM_ETH_RXPAUSE;
   12266 		}
   12267 	}
   12268 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12269 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12270 setled:
   12271 	wm_tbi_serdes_set_linkled(sc);
   12272 }
   12273 
   12274 /*
   12275  * wm_serdes_tick:
   12276  *
   12277  *	Check the link on serdes devices.
   12278  */
   12279 static void
   12280 wm_serdes_tick(struct wm_softc *sc)
   12281 {
   12282 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12283 	struct mii_data *mii = &sc->sc_mii;
   12284 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12285 	uint32_t reg;
   12286 
   12287 	KASSERT(WM_CORE_LOCKED(sc));
   12288 
   12289 	mii->mii_media_status = IFM_AVALID;
   12290 	mii->mii_media_active = IFM_ETHER;
   12291 
   12292 	/* Check PCS */
   12293 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12294 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12295 		mii->mii_media_status |= IFM_ACTIVE;
   12296 		sc->sc_tbi_linkup = 1;
   12297 		sc->sc_tbi_serdes_ticks = 0;
   12298 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12299 		if ((reg & PCS_LSTS_FDX) != 0)
   12300 			mii->mii_media_active |= IFM_FDX;
   12301 		else
   12302 			mii->mii_media_active |= IFM_HDX;
   12303 	} else {
   12304 		mii->mii_media_status |= IFM_NONE;
   12305 		sc->sc_tbi_linkup = 0;
   12306 		/* If the timer expired, retry autonegotiation */
   12307 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12308 		    && (++sc->sc_tbi_serdes_ticks
   12309 			>= sc->sc_tbi_serdes_anegticks)) {
   12310 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12311 			sc->sc_tbi_serdes_ticks = 0;
   12312 			/* XXX */
   12313 			wm_serdes_mediachange(ifp);
   12314 		}
   12315 	}
   12316 
   12317 	wm_tbi_serdes_set_linkled(sc);
   12318 }
   12319 
   12320 /* SFP related */
   12321 
   12322 static int
   12323 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12324 {
   12325 	uint32_t i2ccmd;
   12326 	int i;
   12327 
   12328 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12329 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12330 
   12331 	/* Poll the ready bit */
   12332 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12333 		delay(50);
   12334 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12335 		if (i2ccmd & I2CCMD_READY)
   12336 			break;
   12337 	}
   12338 	if ((i2ccmd & I2CCMD_READY) == 0)
   12339 		return -1;
   12340 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12341 		return -1;
   12342 
   12343 	*data = i2ccmd & 0x00ff;
   12344 
   12345 	return 0;
   12346 }
   12347 
   12348 static uint32_t
   12349 wm_sfp_get_media_type(struct wm_softc *sc)
   12350 {
   12351 	uint32_t ctrl_ext;
   12352 	uint8_t val = 0;
   12353 	int timeout = 3;
   12354 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12355 	int rv = -1;
   12356 
   12357 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12358 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12359 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12360 	CSR_WRITE_FLUSH(sc);
   12361 
   12362 	/* Read SFP module data */
   12363 	while (timeout) {
   12364 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12365 		if (rv == 0)
   12366 			break;
   12367 		delay(100*1000); /* XXX too big */
   12368 		timeout--;
   12369 	}
   12370 	if (rv != 0)
   12371 		goto out;
   12372 	switch (val) {
   12373 	case SFF_SFP_ID_SFF:
   12374 		aprint_normal_dev(sc->sc_dev,
   12375 		    "Module/Connector soldered to board\n");
   12376 		break;
   12377 	case SFF_SFP_ID_SFP:
   12378 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12379 		break;
   12380 	case SFF_SFP_ID_UNKNOWN:
   12381 		goto out;
   12382 	default:
   12383 		break;
   12384 	}
   12385 
   12386 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12387 	if (rv != 0) {
   12388 		goto out;
   12389 	}
   12390 
   12391 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12392 		mediatype = WM_MEDIATYPE_SERDES;
   12393 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12394 		sc->sc_flags |= WM_F_SGMII;
   12395 		mediatype = WM_MEDIATYPE_COPPER;
   12396 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12397 		sc->sc_flags |= WM_F_SGMII;
   12398 		mediatype = WM_MEDIATYPE_SERDES;
   12399 	}
   12400 
   12401 out:
   12402 	/* Restore I2C interface setting */
   12403 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12404 
   12405 	return mediatype;
   12406 }
   12407 
   12408 /*
   12409  * NVM related.
   12410  * Microwire, SPI (w/wo EERD) and Flash.
   12411  */
   12412 
   12413 /* Both spi and uwire */
   12414 
   12415 /*
   12416  * wm_eeprom_sendbits:
   12417  *
   12418  *	Send a series of bits to the EEPROM.
   12419  */
   12420 static void
   12421 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12422 {
   12423 	uint32_t reg;
   12424 	int x;
   12425 
   12426 	reg = CSR_READ(sc, WMREG_EECD);
   12427 
   12428 	for (x = nbits; x > 0; x--) {
   12429 		if (bits & (1U << (x - 1)))
   12430 			reg |= EECD_DI;
   12431 		else
   12432 			reg &= ~EECD_DI;
   12433 		CSR_WRITE(sc, WMREG_EECD, reg);
   12434 		CSR_WRITE_FLUSH(sc);
   12435 		delay(2);
   12436 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12437 		CSR_WRITE_FLUSH(sc);
   12438 		delay(2);
   12439 		CSR_WRITE(sc, WMREG_EECD, reg);
   12440 		CSR_WRITE_FLUSH(sc);
   12441 		delay(2);
   12442 	}
   12443 }
   12444 
   12445 /*
   12446  * wm_eeprom_recvbits:
   12447  *
   12448  *	Receive a series of bits from the EEPROM.
   12449  */
   12450 static void
   12451 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12452 {
   12453 	uint32_t reg, val;
   12454 	int x;
   12455 
   12456 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12457 
   12458 	val = 0;
   12459 	for (x = nbits; x > 0; x--) {
   12460 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12461 		CSR_WRITE_FLUSH(sc);
   12462 		delay(2);
   12463 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12464 			val |= (1U << (x - 1));
   12465 		CSR_WRITE(sc, WMREG_EECD, reg);
   12466 		CSR_WRITE_FLUSH(sc);
   12467 		delay(2);
   12468 	}
   12469 	*valp = val;
   12470 }
   12471 
   12472 /* Microwire */
   12473 
   12474 /*
   12475  * wm_nvm_read_uwire:
   12476  *
   12477  *	Read a word from the EEPROM using the MicroWire protocol.
   12478  */
   12479 static int
   12480 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12481 {
   12482 	uint32_t reg, val;
   12483 	int i;
   12484 
   12485 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12486 		device_xname(sc->sc_dev), __func__));
   12487 
   12488 	if (sc->nvm.acquire(sc) != 0)
   12489 		return -1;
   12490 
   12491 	for (i = 0; i < wordcnt; i++) {
   12492 		/* Clear SK and DI. */
   12493 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12494 		CSR_WRITE(sc, WMREG_EECD, reg);
   12495 
   12496 		/*
   12497 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12498 		 * and Xen.
   12499 		 *
   12500 		 * We use this workaround only for 82540 because qemu's
   12501 		 * e1000 act as 82540.
   12502 		 */
   12503 		if (sc->sc_type == WM_T_82540) {
   12504 			reg |= EECD_SK;
   12505 			CSR_WRITE(sc, WMREG_EECD, reg);
   12506 			reg &= ~EECD_SK;
   12507 			CSR_WRITE(sc, WMREG_EECD, reg);
   12508 			CSR_WRITE_FLUSH(sc);
   12509 			delay(2);
   12510 		}
   12511 		/* XXX: end of workaround */
   12512 
   12513 		/* Set CHIP SELECT. */
   12514 		reg |= EECD_CS;
   12515 		CSR_WRITE(sc, WMREG_EECD, reg);
   12516 		CSR_WRITE_FLUSH(sc);
   12517 		delay(2);
   12518 
   12519 		/* Shift in the READ command. */
   12520 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12521 
   12522 		/* Shift in address. */
   12523 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12524 
   12525 		/* Shift out the data. */
   12526 		wm_eeprom_recvbits(sc, &val, 16);
   12527 		data[i] = val & 0xffff;
   12528 
   12529 		/* Clear CHIP SELECT. */
   12530 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12531 		CSR_WRITE(sc, WMREG_EECD, reg);
   12532 		CSR_WRITE_FLUSH(sc);
   12533 		delay(2);
   12534 	}
   12535 
   12536 	sc->nvm.release(sc);
   12537 	return 0;
   12538 }
   12539 
   12540 /* SPI */
   12541 
   12542 /*
   12543  * Set SPI and FLASH related information from the EECD register.
   12544  * For 82541 and 82547, the word size is taken from EEPROM.
   12545  */
   12546 static int
   12547 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12548 {
   12549 	int size;
   12550 	uint32_t reg;
   12551 	uint16_t data;
   12552 
   12553 	reg = CSR_READ(sc, WMREG_EECD);
   12554 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12555 
   12556 	/* Read the size of NVM from EECD by default */
   12557 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12558 	switch (sc->sc_type) {
   12559 	case WM_T_82541:
   12560 	case WM_T_82541_2:
   12561 	case WM_T_82547:
   12562 	case WM_T_82547_2:
   12563 		/* Set dummy value to access EEPROM */
   12564 		sc->sc_nvm_wordsize = 64;
   12565 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12566 			aprint_error_dev(sc->sc_dev,
   12567 			    "%s: failed to read EEPROM size\n", __func__);
   12568 		}
   12569 		reg = data;
   12570 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12571 		if (size == 0)
   12572 			size = 6; /* 64 word size */
   12573 		else
   12574 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12575 		break;
   12576 	case WM_T_80003:
   12577 	case WM_T_82571:
   12578 	case WM_T_82572:
   12579 	case WM_T_82573: /* SPI case */
   12580 	case WM_T_82574: /* SPI case */
   12581 	case WM_T_82583: /* SPI case */
   12582 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12583 		if (size > 14)
   12584 			size = 14;
   12585 		break;
   12586 	case WM_T_82575:
   12587 	case WM_T_82576:
   12588 	case WM_T_82580:
   12589 	case WM_T_I350:
   12590 	case WM_T_I354:
   12591 	case WM_T_I210:
   12592 	case WM_T_I211:
   12593 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12594 		if (size > 15)
   12595 			size = 15;
   12596 		break;
   12597 	default:
   12598 		aprint_error_dev(sc->sc_dev,
   12599 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12600 		return -1;
   12601 		break;
   12602 	}
   12603 
   12604 	sc->sc_nvm_wordsize = 1 << size;
   12605 
   12606 	return 0;
   12607 }
   12608 
   12609 /*
   12610  * wm_nvm_ready_spi:
   12611  *
   12612  *	Wait for a SPI EEPROM to be ready for commands.
   12613  */
   12614 static int
   12615 wm_nvm_ready_spi(struct wm_softc *sc)
   12616 {
   12617 	uint32_t val;
   12618 	int usec;
   12619 
   12620 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12621 		device_xname(sc->sc_dev), __func__));
   12622 
   12623 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12624 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12625 		wm_eeprom_recvbits(sc, &val, 8);
   12626 		if ((val & SPI_SR_RDY) == 0)
   12627 			break;
   12628 	}
   12629 	if (usec >= SPI_MAX_RETRIES) {
   12630 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12631 		return -1;
   12632 	}
   12633 	return 0;
   12634 }
   12635 
   12636 /*
   12637  * wm_nvm_read_spi:
   12638  *
   12639  *	Read a work from the EEPROM using the SPI protocol.
   12640  */
   12641 static int
   12642 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12643 {
   12644 	uint32_t reg, val;
   12645 	int i;
   12646 	uint8_t opc;
   12647 	int rv = 0;
   12648 
   12649 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12650 		device_xname(sc->sc_dev), __func__));
   12651 
   12652 	if (sc->nvm.acquire(sc) != 0)
   12653 		return -1;
   12654 
   12655 	/* Clear SK and CS. */
   12656 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12657 	CSR_WRITE(sc, WMREG_EECD, reg);
   12658 	CSR_WRITE_FLUSH(sc);
   12659 	delay(2);
   12660 
   12661 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12662 		goto out;
   12663 
   12664 	/* Toggle CS to flush commands. */
   12665 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12666 	CSR_WRITE_FLUSH(sc);
   12667 	delay(2);
   12668 	CSR_WRITE(sc, WMREG_EECD, reg);
   12669 	CSR_WRITE_FLUSH(sc);
   12670 	delay(2);
   12671 
   12672 	opc = SPI_OPC_READ;
   12673 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12674 		opc |= SPI_OPC_A8;
   12675 
   12676 	wm_eeprom_sendbits(sc, opc, 8);
   12677 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12678 
   12679 	for (i = 0; i < wordcnt; i++) {
   12680 		wm_eeprom_recvbits(sc, &val, 16);
   12681 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12682 	}
   12683 
   12684 	/* Raise CS and clear SK. */
   12685 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12686 	CSR_WRITE(sc, WMREG_EECD, reg);
   12687 	CSR_WRITE_FLUSH(sc);
   12688 	delay(2);
   12689 
   12690 out:
   12691 	sc->nvm.release(sc);
   12692 	return rv;
   12693 }
   12694 
   12695 /* Using with EERD */
   12696 
   12697 static int
   12698 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12699 {
   12700 	uint32_t attempts = 100000;
   12701 	uint32_t i, reg = 0;
   12702 	int32_t done = -1;
   12703 
   12704 	for (i = 0; i < attempts; i++) {
   12705 		reg = CSR_READ(sc, rw);
   12706 
   12707 		if (reg & EERD_DONE) {
   12708 			done = 0;
   12709 			break;
   12710 		}
   12711 		delay(5);
   12712 	}
   12713 
   12714 	return done;
   12715 }
   12716 
   12717 static int
   12718 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12719 {
   12720 	int i, eerd = 0;
   12721 	int rv = 0;
   12722 
   12723 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12724 		device_xname(sc->sc_dev), __func__));
   12725 
   12726 	if (sc->nvm.acquire(sc) != 0)
   12727 		return -1;
   12728 
   12729 	for (i = 0; i < wordcnt; i++) {
   12730 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12731 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12732 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12733 		if (rv != 0) {
   12734 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12735 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12736 			break;
   12737 		}
   12738 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12739 	}
   12740 
   12741 	sc->nvm.release(sc);
   12742 	return rv;
   12743 }
   12744 
   12745 /* Flash */
   12746 
   12747 static int
   12748 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12749 {
   12750 	uint32_t eecd;
   12751 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12752 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12753 	uint32_t nvm_dword = 0;
   12754 	uint8_t sig_byte = 0;
   12755 	int rv;
   12756 
   12757 	switch (sc->sc_type) {
   12758 	case WM_T_PCH_SPT:
   12759 	case WM_T_PCH_CNP:
   12760 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12761 		act_offset = ICH_NVM_SIG_WORD * 2;
   12762 
   12763 		/* Set bank to 0 in case flash read fails. */
   12764 		*bank = 0;
   12765 
   12766 		/* Check bank 0 */
   12767 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12768 		if (rv != 0)
   12769 			return rv;
   12770 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12771 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12772 			*bank = 0;
   12773 			return 0;
   12774 		}
   12775 
   12776 		/* Check bank 1 */
   12777 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12778 		    &nvm_dword);
   12779 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12780 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12781 			*bank = 1;
   12782 			return 0;
   12783 		}
   12784 		aprint_error_dev(sc->sc_dev,
   12785 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12786 		return -1;
   12787 	case WM_T_ICH8:
   12788 	case WM_T_ICH9:
   12789 		eecd = CSR_READ(sc, WMREG_EECD);
   12790 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12791 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12792 			return 0;
   12793 		}
   12794 		/* FALLTHROUGH */
   12795 	default:
   12796 		/* Default to 0 */
   12797 		*bank = 0;
   12798 
   12799 		/* Check bank 0 */
   12800 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12801 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12802 			*bank = 0;
   12803 			return 0;
   12804 		}
   12805 
   12806 		/* Check bank 1 */
   12807 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12808 		    &sig_byte);
   12809 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12810 			*bank = 1;
   12811 			return 0;
   12812 		}
   12813 	}
   12814 
   12815 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12816 		device_xname(sc->sc_dev)));
   12817 	return -1;
   12818 }
   12819 
   12820 /******************************************************************************
   12821  * This function does initial flash setup so that a new read/write/erase cycle
   12822  * can be started.
   12823  *
   12824  * sc - The pointer to the hw structure
   12825  ****************************************************************************/
   12826 static int32_t
   12827 wm_ich8_cycle_init(struct wm_softc *sc)
   12828 {
   12829 	uint16_t hsfsts;
   12830 	int32_t error = 1;
   12831 	int32_t i     = 0;
   12832 
   12833 	if (sc->sc_type >= WM_T_PCH_SPT)
   12834 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12835 	else
   12836 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12837 
   12838 	/* May be check the Flash Des Valid bit in Hw status */
   12839 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12840 		return error;
   12841 
   12842 	/* Clear FCERR in Hw status by writing 1 */
   12843 	/* Clear DAEL in Hw status by writing a 1 */
   12844 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12845 
   12846 	if (sc->sc_type >= WM_T_PCH_SPT)
   12847 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12848 	else
   12849 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12850 
   12851 	/*
   12852 	 * Either we should have a hardware SPI cycle in progress bit to check
   12853 	 * against, in order to start a new cycle or FDONE bit should be
   12854 	 * changed in the hardware so that it is 1 after harware reset, which
   12855 	 * can then be used as an indication whether a cycle is in progress or
   12856 	 * has been completed .. we should also have some software semaphore
   12857 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12858 	 * threads access to those bits can be sequentiallized or a way so that
   12859 	 * 2 threads dont start the cycle at the same time
   12860 	 */
   12861 
   12862 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12863 		/*
   12864 		 * There is no cycle running at present, so we can start a
   12865 		 * cycle
   12866 		 */
   12867 
   12868 		/* Begin by setting Flash Cycle Done. */
   12869 		hsfsts |= HSFSTS_DONE;
   12870 		if (sc->sc_type >= WM_T_PCH_SPT)
   12871 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12872 			    hsfsts & 0xffffUL);
   12873 		else
   12874 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12875 		error = 0;
   12876 	} else {
   12877 		/*
   12878 		 * Otherwise poll for sometime so the current cycle has a
   12879 		 * chance to end before giving up.
   12880 		 */
   12881 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12882 			if (sc->sc_type >= WM_T_PCH_SPT)
   12883 				hsfsts = ICH8_FLASH_READ32(sc,
   12884 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12885 			else
   12886 				hsfsts = ICH8_FLASH_READ16(sc,
   12887 				    ICH_FLASH_HSFSTS);
   12888 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12889 				error = 0;
   12890 				break;
   12891 			}
   12892 			delay(1);
   12893 		}
   12894 		if (error == 0) {
   12895 			/*
   12896 			 * Successful in waiting for previous cycle to timeout,
   12897 			 * now set the Flash Cycle Done.
   12898 			 */
   12899 			hsfsts |= HSFSTS_DONE;
   12900 			if (sc->sc_type >= WM_T_PCH_SPT)
   12901 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12902 				    hsfsts & 0xffffUL);
   12903 			else
   12904 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12905 				    hsfsts);
   12906 		}
   12907 	}
   12908 	return error;
   12909 }
   12910 
   12911 /******************************************************************************
   12912  * This function starts a flash cycle and waits for its completion
   12913  *
   12914  * sc - The pointer to the hw structure
   12915  ****************************************************************************/
   12916 static int32_t
   12917 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12918 {
   12919 	uint16_t hsflctl;
   12920 	uint16_t hsfsts;
   12921 	int32_t error = 1;
   12922 	uint32_t i = 0;
   12923 
   12924 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12925 	if (sc->sc_type >= WM_T_PCH_SPT)
   12926 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12927 	else
   12928 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12929 	hsflctl |= HSFCTL_GO;
   12930 	if (sc->sc_type >= WM_T_PCH_SPT)
   12931 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12932 		    (uint32_t)hsflctl << 16);
   12933 	else
   12934 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12935 
   12936 	/* Wait till FDONE bit is set to 1 */
   12937 	do {
   12938 		if (sc->sc_type >= WM_T_PCH_SPT)
   12939 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12940 			    & 0xffffUL;
   12941 		else
   12942 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12943 		if (hsfsts & HSFSTS_DONE)
   12944 			break;
   12945 		delay(1);
   12946 		i++;
   12947 	} while (i < timeout);
   12948 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12949 		error = 0;
   12950 
   12951 	return error;
   12952 }
   12953 
   12954 /******************************************************************************
   12955  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12956  *
   12957  * sc - The pointer to the hw structure
   12958  * index - The index of the byte or word to read.
   12959  * size - Size of data to read, 1=byte 2=word, 4=dword
   12960  * data - Pointer to the word to store the value read.
   12961  *****************************************************************************/
   12962 static int32_t
   12963 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12964     uint32_t size, uint32_t *data)
   12965 {
   12966 	uint16_t hsfsts;
   12967 	uint16_t hsflctl;
   12968 	uint32_t flash_linear_address;
   12969 	uint32_t flash_data = 0;
   12970 	int32_t error = 1;
   12971 	int32_t count = 0;
   12972 
   12973 	if (size < 1  || size > 4 || data == 0x0 ||
   12974 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12975 		return error;
   12976 
   12977 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12978 	    sc->sc_ich8_flash_base;
   12979 
   12980 	do {
   12981 		delay(1);
   12982 		/* Steps */
   12983 		error = wm_ich8_cycle_init(sc);
   12984 		if (error)
   12985 			break;
   12986 
   12987 		if (sc->sc_type >= WM_T_PCH_SPT)
   12988 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12989 			    >> 16;
   12990 		else
   12991 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12992 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12993 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12994 		    & HSFCTL_BCOUNT_MASK;
   12995 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12996 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12997 			/*
   12998 			 * In SPT, This register is in Lan memory space, not
   12999 			 * flash. Therefore, only 32 bit access is supported.
   13000 			 */
   13001 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13002 			    (uint32_t)hsflctl << 16);
   13003 		} else
   13004 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13005 
   13006 		/*
   13007 		 * Write the last 24 bits of index into Flash Linear address
   13008 		 * field in Flash Address
   13009 		 */
   13010 		/* TODO: TBD maybe check the index against the size of flash */
   13011 
   13012 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13013 
   13014 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13015 
   13016 		/*
   13017 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13018 		 * the whole sequence a few more times, else read in (shift in)
   13019 		 * the Flash Data0, the order is least significant byte first
   13020 		 * msb to lsb
   13021 		 */
   13022 		if (error == 0) {
   13023 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13024 			if (size == 1)
   13025 				*data = (uint8_t)(flash_data & 0x000000FF);
   13026 			else if (size == 2)
   13027 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13028 			else if (size == 4)
   13029 				*data = (uint32_t)flash_data;
   13030 			break;
   13031 		} else {
   13032 			/*
   13033 			 * If we've gotten here, then things are probably
   13034 			 * completely hosed, but if the error condition is
   13035 			 * detected, it won't hurt to give it another try...
   13036 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13037 			 */
   13038 			if (sc->sc_type >= WM_T_PCH_SPT)
   13039 				hsfsts = ICH8_FLASH_READ32(sc,
   13040 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13041 			else
   13042 				hsfsts = ICH8_FLASH_READ16(sc,
   13043 				    ICH_FLASH_HSFSTS);
   13044 
   13045 			if (hsfsts & HSFSTS_ERR) {
   13046 				/* Repeat for some time before giving up. */
   13047 				continue;
   13048 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13049 				break;
   13050 		}
   13051 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13052 
   13053 	return error;
   13054 }
   13055 
   13056 /******************************************************************************
   13057  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13058  *
   13059  * sc - pointer to wm_hw structure
   13060  * index - The index of the byte to read.
   13061  * data - Pointer to a byte to store the value read.
   13062  *****************************************************************************/
   13063 static int32_t
   13064 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13065 {
   13066 	int32_t status;
   13067 	uint32_t word = 0;
   13068 
   13069 	status = wm_read_ich8_data(sc, index, 1, &word);
   13070 	if (status == 0)
   13071 		*data = (uint8_t)word;
   13072 	else
   13073 		*data = 0;
   13074 
   13075 	return status;
   13076 }
   13077 
   13078 /******************************************************************************
   13079  * Reads a word from the NVM using the ICH8 flash access registers.
   13080  *
   13081  * sc - pointer to wm_hw structure
   13082  * index - The starting byte index of the word to read.
   13083  * data - Pointer to a word to store the value read.
   13084  *****************************************************************************/
   13085 static int32_t
   13086 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13087 {
   13088 	int32_t status;
   13089 	uint32_t word = 0;
   13090 
   13091 	status = wm_read_ich8_data(sc, index, 2, &word);
   13092 	if (status == 0)
   13093 		*data = (uint16_t)word;
   13094 	else
   13095 		*data = 0;
   13096 
   13097 	return status;
   13098 }
   13099 
   13100 /******************************************************************************
   13101  * Reads a dword from the NVM using the ICH8 flash access registers.
   13102  *
   13103  * sc - pointer to wm_hw structure
   13104  * index - The starting byte index of the word to read.
   13105  * data - Pointer to a word to store the value read.
   13106  *****************************************************************************/
   13107 static int32_t
   13108 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13109 {
   13110 	int32_t status;
   13111 
   13112 	status = wm_read_ich8_data(sc, index, 4, data);
   13113 	return status;
   13114 }
   13115 
   13116 /******************************************************************************
   13117  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13118  * register.
   13119  *
   13120  * sc - Struct containing variables accessed by shared code
   13121  * offset - offset of word in the EEPROM to read
   13122  * data - word read from the EEPROM
   13123  * words - number of words to read
   13124  *****************************************************************************/
   13125 static int
   13126 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13127 {
   13128 	int32_t	 rv = 0;
   13129 	uint32_t flash_bank = 0;
   13130 	uint32_t act_offset = 0;
   13131 	uint32_t bank_offset = 0;
   13132 	uint16_t word = 0;
   13133 	uint16_t i = 0;
   13134 
   13135 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13136 		device_xname(sc->sc_dev), __func__));
   13137 
   13138 	if (sc->nvm.acquire(sc) != 0)
   13139 		return -1;
   13140 
   13141 	/*
   13142 	 * We need to know which is the valid flash bank.  In the event
   13143 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13144 	 * managing flash_bank. So it cannot be trusted and needs
   13145 	 * to be updated with each read.
   13146 	 */
   13147 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13148 	if (rv) {
   13149 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13150 			device_xname(sc->sc_dev)));
   13151 		flash_bank = 0;
   13152 	}
   13153 
   13154 	/*
   13155 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13156 	 * size
   13157 	 */
   13158 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13159 
   13160 	for (i = 0; i < words; i++) {
   13161 		/* The NVM part needs a byte offset, hence * 2 */
   13162 		act_offset = bank_offset + ((offset + i) * 2);
   13163 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13164 		if (rv) {
   13165 			aprint_error_dev(sc->sc_dev,
   13166 			    "%s: failed to read NVM\n", __func__);
   13167 			break;
   13168 		}
   13169 		data[i] = word;
   13170 	}
   13171 
   13172 	sc->nvm.release(sc);
   13173 	return rv;
   13174 }
   13175 
   13176 /******************************************************************************
   13177  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13178  * register.
   13179  *
   13180  * sc - Struct containing variables accessed by shared code
   13181  * offset - offset of word in the EEPROM to read
   13182  * data - word read from the EEPROM
   13183  * words - number of words to read
   13184  *****************************************************************************/
   13185 static int
   13186 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13187 {
   13188 	int32_t	 rv = 0;
   13189 	uint32_t flash_bank = 0;
   13190 	uint32_t act_offset = 0;
   13191 	uint32_t bank_offset = 0;
   13192 	uint32_t dword = 0;
   13193 	uint16_t i = 0;
   13194 
   13195 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13196 		device_xname(sc->sc_dev), __func__));
   13197 
   13198 	if (sc->nvm.acquire(sc) != 0)
   13199 		return -1;
   13200 
   13201 	/*
   13202 	 * We need to know which is the valid flash bank.  In the event
   13203 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13204 	 * managing flash_bank. So it cannot be trusted and needs
   13205 	 * to be updated with each read.
   13206 	 */
   13207 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13208 	if (rv) {
   13209 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13210 			device_xname(sc->sc_dev)));
   13211 		flash_bank = 0;
   13212 	}
   13213 
   13214 	/*
   13215 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13216 	 * size
   13217 	 */
   13218 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13219 
   13220 	for (i = 0; i < words; i++) {
   13221 		/* The NVM part needs a byte offset, hence * 2 */
   13222 		act_offset = bank_offset + ((offset + i) * 2);
   13223 		/* but we must read dword aligned, so mask ... */
   13224 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13225 		if (rv) {
   13226 			aprint_error_dev(sc->sc_dev,
   13227 			    "%s: failed to read NVM\n", __func__);
   13228 			break;
   13229 		}
   13230 		/* ... and pick out low or high word */
   13231 		if ((act_offset & 0x2) == 0)
   13232 			data[i] = (uint16_t)(dword & 0xFFFF);
   13233 		else
   13234 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13235 	}
   13236 
   13237 	sc->nvm.release(sc);
   13238 	return rv;
   13239 }
   13240 
   13241 /* iNVM */
   13242 
   13243 static int
   13244 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13245 {
   13246 	int32_t	 rv = 0;
   13247 	uint32_t invm_dword;
   13248 	uint16_t i;
   13249 	uint8_t record_type, word_address;
   13250 
   13251 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13252 		device_xname(sc->sc_dev), __func__));
   13253 
   13254 	for (i = 0; i < INVM_SIZE; i++) {
   13255 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13256 		/* Get record type */
   13257 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13258 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13259 			break;
   13260 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13261 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13262 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13263 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13264 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13265 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13266 			if (word_address == address) {
   13267 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13268 				rv = 0;
   13269 				break;
   13270 			}
   13271 		}
   13272 	}
   13273 
   13274 	return rv;
   13275 }
   13276 
   13277 static int
   13278 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13279 {
   13280 	int rv = 0;
   13281 	int i;
   13282 
   13283 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13284 		device_xname(sc->sc_dev), __func__));
   13285 
   13286 	if (sc->nvm.acquire(sc) != 0)
   13287 		return -1;
   13288 
   13289 	for (i = 0; i < words; i++) {
   13290 		switch (offset + i) {
   13291 		case NVM_OFF_MACADDR:
   13292 		case NVM_OFF_MACADDR1:
   13293 		case NVM_OFF_MACADDR2:
   13294 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13295 			if (rv != 0) {
   13296 				data[i] = 0xffff;
   13297 				rv = -1;
   13298 			}
   13299 			break;
   13300 		case NVM_OFF_CFG2:
   13301 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13302 			if (rv != 0) {
   13303 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13304 				rv = 0;
   13305 			}
   13306 			break;
   13307 		case NVM_OFF_CFG4:
   13308 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13309 			if (rv != 0) {
   13310 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13311 				rv = 0;
   13312 			}
   13313 			break;
   13314 		case NVM_OFF_LED_1_CFG:
   13315 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13316 			if (rv != 0) {
   13317 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13318 				rv = 0;
   13319 			}
   13320 			break;
   13321 		case NVM_OFF_LED_0_2_CFG:
   13322 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13323 			if (rv != 0) {
   13324 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13325 				rv = 0;
   13326 			}
   13327 			break;
   13328 		case NVM_OFF_ID_LED_SETTINGS:
   13329 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13330 			if (rv != 0) {
   13331 				*data = ID_LED_RESERVED_FFFF;
   13332 				rv = 0;
   13333 			}
   13334 			break;
   13335 		default:
   13336 			DPRINTF(WM_DEBUG_NVM,
   13337 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13338 			*data = NVM_RESERVED_WORD;
   13339 			break;
   13340 		}
   13341 	}
   13342 
   13343 	sc->nvm.release(sc);
   13344 	return rv;
   13345 }
   13346 
   13347 /* Lock, detecting NVM type, validate checksum, version and read */
   13348 
   13349 static int
   13350 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13351 {
   13352 	uint32_t eecd = 0;
   13353 
   13354 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13355 	    || sc->sc_type == WM_T_82583) {
   13356 		eecd = CSR_READ(sc, WMREG_EECD);
   13357 
   13358 		/* Isolate bits 15 & 16 */
   13359 		eecd = ((eecd >> 15) & 0x03);
   13360 
   13361 		/* If both bits are set, device is Flash type */
   13362 		if (eecd == 0x03)
   13363 			return 0;
   13364 	}
   13365 	return 1;
   13366 }
   13367 
   13368 static int
   13369 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13370 {
   13371 	uint32_t eec;
   13372 
   13373 	eec = CSR_READ(sc, WMREG_EEC);
   13374 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13375 		return 1;
   13376 
   13377 	return 0;
   13378 }
   13379 
   13380 /*
   13381  * wm_nvm_validate_checksum
   13382  *
   13383  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13384  */
   13385 static int
   13386 wm_nvm_validate_checksum(struct wm_softc *sc)
   13387 {
   13388 	uint16_t checksum;
   13389 	uint16_t eeprom_data;
   13390 #ifdef WM_DEBUG
   13391 	uint16_t csum_wordaddr, valid_checksum;
   13392 #endif
   13393 	int i;
   13394 
   13395 	checksum = 0;
   13396 
   13397 	/* Don't check for I211 */
   13398 	if (sc->sc_type == WM_T_I211)
   13399 		return 0;
   13400 
   13401 #ifdef WM_DEBUG
   13402 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13403 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13404 		csum_wordaddr = NVM_OFF_COMPAT;
   13405 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13406 	} else {
   13407 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13408 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13409 	}
   13410 
   13411 	/* Dump EEPROM image for debug */
   13412 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13413 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13414 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13415 		/* XXX PCH_SPT? */
   13416 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13417 		if ((eeprom_data & valid_checksum) == 0)
   13418 			DPRINTF(WM_DEBUG_NVM,
   13419 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13420 				device_xname(sc->sc_dev), eeprom_data,
   13421 				    valid_checksum));
   13422 	}
   13423 
   13424 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13425 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13426 		for (i = 0; i < NVM_SIZE; i++) {
   13427 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13428 				printf("XXXX ");
   13429 			else
   13430 				printf("%04hx ", eeprom_data);
   13431 			if (i % 8 == 7)
   13432 				printf("\n");
   13433 		}
   13434 	}
   13435 
   13436 #endif /* WM_DEBUG */
   13437 
   13438 	for (i = 0; i < NVM_SIZE; i++) {
   13439 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13440 			return 1;
   13441 		checksum += eeprom_data;
   13442 	}
   13443 
   13444 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13445 #ifdef WM_DEBUG
   13446 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13447 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13448 #endif
   13449 	}
   13450 
   13451 	return 0;
   13452 }
   13453 
   13454 static void
   13455 wm_nvm_version_invm(struct wm_softc *sc)
   13456 {
   13457 	uint32_t dword;
   13458 
   13459 	/*
   13460 	 * Linux's code to decode version is very strange, so we don't
   13461 	 * obey that algorithm and just use word 61 as the document.
   13462 	 * Perhaps it's not perfect though...
   13463 	 *
   13464 	 * Example:
   13465 	 *
   13466 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13467 	 */
   13468 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13469 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13470 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13471 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13472 }
   13473 
   13474 static void
   13475 wm_nvm_version(struct wm_softc *sc)
   13476 {
   13477 	uint16_t major, minor, build, patch;
   13478 	uint16_t uid0, uid1;
   13479 	uint16_t nvm_data;
   13480 	uint16_t off;
   13481 	bool check_version = false;
   13482 	bool check_optionrom = false;
   13483 	bool have_build = false;
   13484 	bool have_uid = true;
   13485 
   13486 	/*
   13487 	 * Version format:
   13488 	 *
   13489 	 * XYYZ
   13490 	 * X0YZ
   13491 	 * X0YY
   13492 	 *
   13493 	 * Example:
   13494 	 *
   13495 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13496 	 *	82571	0x50a6	5.10.6?
   13497 	 *	82572	0x506a	5.6.10?
   13498 	 *	82572EI	0x5069	5.6.9?
   13499 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13500 	 *		0x2013	2.1.3?
   13501 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13502 	 * ICH8+82567	0x0040	0.4.0?
   13503 	 * ICH9+82566	0x1040	1.4.0?
   13504 	 *ICH10+82567	0x0043	0.4.3?
   13505 	 *  PCH+82577	0x00c1	0.12.1?
   13506 	 * PCH2+82579	0x00d3	0.13.3?
   13507 	 *		0x00d4	0.13.4?
   13508 	 *  LPT+I218	0x0023	0.2.3?
   13509 	 *  SPT+I219	0x0084	0.8.4?
   13510 	 *  CNP+I219	0x0054	0.5.4?
   13511 	 */
   13512 
   13513 	/*
   13514 	 * XXX
   13515 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13516 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13517 	 */
   13518 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13519 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13520 		have_uid = false;
   13521 
   13522 	switch (sc->sc_type) {
   13523 	case WM_T_82571:
   13524 	case WM_T_82572:
   13525 	case WM_T_82574:
   13526 	case WM_T_82583:
   13527 		check_version = true;
   13528 		check_optionrom = true;
   13529 		have_build = true;
   13530 		break;
   13531 	case WM_T_ICH8:
   13532 	case WM_T_ICH9:
   13533 	case WM_T_ICH10:
   13534 	case WM_T_PCH:
   13535 	case WM_T_PCH2:
   13536 	case WM_T_PCH_LPT:
   13537 	case WM_T_PCH_SPT:
   13538 	case WM_T_PCH_CNP:
   13539 		check_version = true;
   13540 		have_build = true;
   13541 		have_uid = false;
   13542 		break;
   13543 	case WM_T_82575:
   13544 	case WM_T_82576:
   13545 	case WM_T_82580:
   13546 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13547 			check_version = true;
   13548 		break;
   13549 	case WM_T_I211:
   13550 		wm_nvm_version_invm(sc);
   13551 		have_uid = false;
   13552 		goto printver;
   13553 	case WM_T_I210:
   13554 		if (!wm_nvm_flash_presence_i210(sc)) {
   13555 			wm_nvm_version_invm(sc);
   13556 			have_uid = false;
   13557 			goto printver;
   13558 		}
   13559 		/* FALLTHROUGH */
   13560 	case WM_T_I350:
   13561 	case WM_T_I354:
   13562 		check_version = true;
   13563 		check_optionrom = true;
   13564 		break;
   13565 	default:
   13566 		return;
   13567 	}
   13568 	if (check_version
   13569 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13570 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13571 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13572 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13573 			build = nvm_data & NVM_BUILD_MASK;
   13574 			have_build = true;
   13575 		} else
   13576 			minor = nvm_data & 0x00ff;
   13577 
   13578 		/* Decimal */
   13579 		minor = (minor / 16) * 10 + (minor % 16);
   13580 		sc->sc_nvm_ver_major = major;
   13581 		sc->sc_nvm_ver_minor = minor;
   13582 
   13583 printver:
   13584 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13585 		    sc->sc_nvm_ver_minor);
   13586 		if (have_build) {
   13587 			sc->sc_nvm_ver_build = build;
   13588 			aprint_verbose(".%d", build);
   13589 		}
   13590 	}
   13591 
   13592 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13593 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13594 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13595 		/* Option ROM Version */
   13596 		if ((off != 0x0000) && (off != 0xffff)) {
   13597 			int rv;
   13598 
   13599 			off += NVM_COMBO_VER_OFF;
   13600 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13601 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13602 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13603 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13604 				/* 16bits */
   13605 				major = uid0 >> 8;
   13606 				build = (uid0 << 8) | (uid1 >> 8);
   13607 				patch = uid1 & 0x00ff;
   13608 				aprint_verbose(", option ROM Version %d.%d.%d",
   13609 				    major, build, patch);
   13610 			}
   13611 		}
   13612 	}
   13613 
   13614 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13615 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13616 }
   13617 
   13618 /*
   13619  * wm_nvm_read:
   13620  *
   13621  *	Read data from the serial EEPROM.
   13622  */
   13623 static int
   13624 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13625 {
   13626 	int rv;
   13627 
   13628 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13629 		device_xname(sc->sc_dev), __func__));
   13630 
   13631 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13632 		return -1;
   13633 
   13634 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13635 
   13636 	return rv;
   13637 }
   13638 
   13639 /*
   13640  * Hardware semaphores.
   13641  * Very complexed...
   13642  */
   13643 
   13644 static int
   13645 wm_get_null(struct wm_softc *sc)
   13646 {
   13647 
   13648 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13649 		device_xname(sc->sc_dev), __func__));
   13650 	return 0;
   13651 }
   13652 
   13653 static void
   13654 wm_put_null(struct wm_softc *sc)
   13655 {
   13656 
   13657 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13658 		device_xname(sc->sc_dev), __func__));
   13659 	return;
   13660 }
   13661 
   13662 static int
   13663 wm_get_eecd(struct wm_softc *sc)
   13664 {
   13665 	uint32_t reg;
   13666 	int x;
   13667 
   13668 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13669 		device_xname(sc->sc_dev), __func__));
   13670 
   13671 	reg = CSR_READ(sc, WMREG_EECD);
   13672 
   13673 	/* Request EEPROM access. */
   13674 	reg |= EECD_EE_REQ;
   13675 	CSR_WRITE(sc, WMREG_EECD, reg);
   13676 
   13677 	/* ..and wait for it to be granted. */
   13678 	for (x = 0; x < 1000; x++) {
   13679 		reg = CSR_READ(sc, WMREG_EECD);
   13680 		if (reg & EECD_EE_GNT)
   13681 			break;
   13682 		delay(5);
   13683 	}
   13684 	if ((reg & EECD_EE_GNT) == 0) {
   13685 		aprint_error_dev(sc->sc_dev,
   13686 		    "could not acquire EEPROM GNT\n");
   13687 		reg &= ~EECD_EE_REQ;
   13688 		CSR_WRITE(sc, WMREG_EECD, reg);
   13689 		return -1;
   13690 	}
   13691 
   13692 	return 0;
   13693 }
   13694 
   13695 static void
   13696 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13697 {
   13698 
   13699 	*eecd |= EECD_SK;
   13700 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13701 	CSR_WRITE_FLUSH(sc);
   13702 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13703 		delay(1);
   13704 	else
   13705 		delay(50);
   13706 }
   13707 
   13708 static void
   13709 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13710 {
   13711 
   13712 	*eecd &= ~EECD_SK;
   13713 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13714 	CSR_WRITE_FLUSH(sc);
   13715 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13716 		delay(1);
   13717 	else
   13718 		delay(50);
   13719 }
   13720 
   13721 static void
   13722 wm_put_eecd(struct wm_softc *sc)
   13723 {
   13724 	uint32_t reg;
   13725 
   13726 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13727 		device_xname(sc->sc_dev), __func__));
   13728 
   13729 	/* Stop nvm */
   13730 	reg = CSR_READ(sc, WMREG_EECD);
   13731 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13732 		/* Pull CS high */
   13733 		reg |= EECD_CS;
   13734 		wm_nvm_eec_clock_lower(sc, &reg);
   13735 	} else {
   13736 		/* CS on Microwire is active-high */
   13737 		reg &= ~(EECD_CS | EECD_DI);
   13738 		CSR_WRITE(sc, WMREG_EECD, reg);
   13739 		wm_nvm_eec_clock_raise(sc, &reg);
   13740 		wm_nvm_eec_clock_lower(sc, &reg);
   13741 	}
   13742 
   13743 	reg = CSR_READ(sc, WMREG_EECD);
   13744 	reg &= ~EECD_EE_REQ;
   13745 	CSR_WRITE(sc, WMREG_EECD, reg);
   13746 
   13747 	return;
   13748 }
   13749 
   13750 /*
   13751  * Get hardware semaphore.
   13752  * Same as e1000_get_hw_semaphore_generic()
   13753  */
   13754 static int
   13755 wm_get_swsm_semaphore(struct wm_softc *sc)
   13756 {
   13757 	int32_t timeout;
   13758 	uint32_t swsm;
   13759 
   13760 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13761 		device_xname(sc->sc_dev), __func__));
   13762 	KASSERT(sc->sc_nvm_wordsize > 0);
   13763 
   13764 retry:
   13765 	/* Get the SW semaphore. */
   13766 	timeout = sc->sc_nvm_wordsize + 1;
   13767 	while (timeout) {
   13768 		swsm = CSR_READ(sc, WMREG_SWSM);
   13769 
   13770 		if ((swsm & SWSM_SMBI) == 0)
   13771 			break;
   13772 
   13773 		delay(50);
   13774 		timeout--;
   13775 	}
   13776 
   13777 	if (timeout == 0) {
   13778 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13779 			/*
   13780 			 * In rare circumstances, the SW semaphore may already
   13781 			 * be held unintentionally. Clear the semaphore once
   13782 			 * before giving up.
   13783 			 */
   13784 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13785 			wm_put_swsm_semaphore(sc);
   13786 			goto retry;
   13787 		}
   13788 		aprint_error_dev(sc->sc_dev,
   13789 		    "could not acquire SWSM SMBI\n");
   13790 		return 1;
   13791 	}
   13792 
   13793 	/* Get the FW semaphore. */
   13794 	timeout = sc->sc_nvm_wordsize + 1;
   13795 	while (timeout) {
   13796 		swsm = CSR_READ(sc, WMREG_SWSM);
   13797 		swsm |= SWSM_SWESMBI;
   13798 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13799 		/* If we managed to set the bit we got the semaphore. */
   13800 		swsm = CSR_READ(sc, WMREG_SWSM);
   13801 		if (swsm & SWSM_SWESMBI)
   13802 			break;
   13803 
   13804 		delay(50);
   13805 		timeout--;
   13806 	}
   13807 
   13808 	if (timeout == 0) {
   13809 		aprint_error_dev(sc->sc_dev,
   13810 		    "could not acquire SWSM SWESMBI\n");
   13811 		/* Release semaphores */
   13812 		wm_put_swsm_semaphore(sc);
   13813 		return 1;
   13814 	}
   13815 	return 0;
   13816 }
   13817 
   13818 /*
   13819  * Put hardware semaphore.
   13820  * Same as e1000_put_hw_semaphore_generic()
   13821  */
   13822 static void
   13823 wm_put_swsm_semaphore(struct wm_softc *sc)
   13824 {
   13825 	uint32_t swsm;
   13826 
   13827 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13828 		device_xname(sc->sc_dev), __func__));
   13829 
   13830 	swsm = CSR_READ(sc, WMREG_SWSM);
   13831 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13832 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13833 }
   13834 
   13835 /*
   13836  * Get SW/FW semaphore.
   13837  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13838  */
   13839 static int
   13840 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13841 {
   13842 	uint32_t swfw_sync;
   13843 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13844 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13845 	int timeout;
   13846 
   13847 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13848 		device_xname(sc->sc_dev), __func__));
   13849 
   13850 	if (sc->sc_type == WM_T_80003)
   13851 		timeout = 50;
   13852 	else
   13853 		timeout = 200;
   13854 
   13855 	while (timeout) {
   13856 		if (wm_get_swsm_semaphore(sc)) {
   13857 			aprint_error_dev(sc->sc_dev,
   13858 			    "%s: failed to get semaphore\n",
   13859 			    __func__);
   13860 			return 1;
   13861 		}
   13862 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13863 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13864 			swfw_sync |= swmask;
   13865 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13866 			wm_put_swsm_semaphore(sc);
   13867 			return 0;
   13868 		}
   13869 		wm_put_swsm_semaphore(sc);
   13870 		delay(5000);
   13871 		timeout--;
   13872 	}
   13873 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13874 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13875 	return 1;
   13876 }
   13877 
   13878 static void
   13879 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13880 {
   13881 	uint32_t swfw_sync;
   13882 
   13883 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13884 		device_xname(sc->sc_dev), __func__));
   13885 
   13886 	while (wm_get_swsm_semaphore(sc) != 0)
   13887 		continue;
   13888 
   13889 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13890 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13891 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13892 
   13893 	wm_put_swsm_semaphore(sc);
   13894 }
   13895 
   13896 static int
   13897 wm_get_nvm_80003(struct wm_softc *sc)
   13898 {
   13899 	int rv;
   13900 
   13901 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13902 		device_xname(sc->sc_dev), __func__));
   13903 
   13904 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13905 		aprint_error_dev(sc->sc_dev,
   13906 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   13907 		return rv;
   13908 	}
   13909 
   13910 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13911 	    && (rv = wm_get_eecd(sc)) != 0) {
   13912 		aprint_error_dev(sc->sc_dev,
   13913 		    "%s: failed to get semaphore(EECD)\n", __func__);
   13914 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13915 		return rv;
   13916 	}
   13917 
   13918 	return 0;
   13919 }
   13920 
   13921 static void
   13922 wm_put_nvm_80003(struct wm_softc *sc)
   13923 {
   13924 
   13925 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13926 		device_xname(sc->sc_dev), __func__));
   13927 
   13928 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13929 		wm_put_eecd(sc);
   13930 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13931 }
   13932 
   13933 static int
   13934 wm_get_nvm_82571(struct wm_softc *sc)
   13935 {
   13936 	int rv;
   13937 
   13938 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13939 		device_xname(sc->sc_dev), __func__));
   13940 
   13941 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13942 		return rv;
   13943 
   13944 	switch (sc->sc_type) {
   13945 	case WM_T_82573:
   13946 		break;
   13947 	default:
   13948 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13949 			rv = wm_get_eecd(sc);
   13950 		break;
   13951 	}
   13952 
   13953 	if (rv != 0) {
   13954 		aprint_error_dev(sc->sc_dev,
   13955 		    "%s: failed to get semaphore\n",
   13956 		    __func__);
   13957 		wm_put_swsm_semaphore(sc);
   13958 	}
   13959 
   13960 	return rv;
   13961 }
   13962 
   13963 static void
   13964 wm_put_nvm_82571(struct wm_softc *sc)
   13965 {
   13966 
   13967 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13968 		device_xname(sc->sc_dev), __func__));
   13969 
   13970 	switch (sc->sc_type) {
   13971 	case WM_T_82573:
   13972 		break;
   13973 	default:
   13974 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13975 			wm_put_eecd(sc);
   13976 		break;
   13977 	}
   13978 
   13979 	wm_put_swsm_semaphore(sc);
   13980 }
   13981 
   13982 static int
   13983 wm_get_phy_82575(struct wm_softc *sc)
   13984 {
   13985 
   13986 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13987 		device_xname(sc->sc_dev), __func__));
   13988 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13989 }
   13990 
   13991 static void
   13992 wm_put_phy_82575(struct wm_softc *sc)
   13993 {
   13994 
   13995 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13996 		device_xname(sc->sc_dev), __func__));
   13997 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13998 }
   13999 
   14000 static int
   14001 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14002 {
   14003 	uint32_t ext_ctrl;
   14004 	int timeout = 200;
   14005 
   14006 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14007 		device_xname(sc->sc_dev), __func__));
   14008 
   14009 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14010 	for (timeout = 0; timeout < 200; timeout++) {
   14011 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14012 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14013 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14014 
   14015 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14016 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14017 			return 0;
   14018 		delay(5000);
   14019 	}
   14020 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   14021 	    device_xname(sc->sc_dev), ext_ctrl);
   14022 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14023 	return 1;
   14024 }
   14025 
   14026 static void
   14027 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14028 {
   14029 	uint32_t ext_ctrl;
   14030 
   14031 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14032 		device_xname(sc->sc_dev), __func__));
   14033 
   14034 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14035 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14036 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14037 
   14038 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14039 }
   14040 
   14041 static int
   14042 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14043 {
   14044 	uint32_t ext_ctrl;
   14045 	int timeout;
   14046 
   14047 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14048 		device_xname(sc->sc_dev), __func__));
   14049 	mutex_enter(sc->sc_ich_phymtx);
   14050 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14051 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14052 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14053 			break;
   14054 		delay(1000);
   14055 	}
   14056 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14057 		printf("%s: SW has already locked the resource\n",
   14058 		    device_xname(sc->sc_dev));
   14059 		goto out;
   14060 	}
   14061 
   14062 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14063 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14064 	for (timeout = 0; timeout < 1000; timeout++) {
   14065 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14066 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14067 			break;
   14068 		delay(1000);
   14069 	}
   14070 	if (timeout >= 1000) {
   14071 		printf("%s: failed to acquire semaphore\n",
   14072 		    device_xname(sc->sc_dev));
   14073 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14074 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14075 		goto out;
   14076 	}
   14077 	return 0;
   14078 
   14079 out:
   14080 	mutex_exit(sc->sc_ich_phymtx);
   14081 	return 1;
   14082 }
   14083 
   14084 static void
   14085 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14086 {
   14087 	uint32_t ext_ctrl;
   14088 
   14089 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14090 		device_xname(sc->sc_dev), __func__));
   14091 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14092 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14093 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14094 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14095 	} else {
   14096 		printf("%s: Semaphore unexpectedly released\n",
   14097 		    device_xname(sc->sc_dev));
   14098 	}
   14099 
   14100 	mutex_exit(sc->sc_ich_phymtx);
   14101 }
   14102 
   14103 static int
   14104 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14105 {
   14106 
   14107 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14108 		device_xname(sc->sc_dev), __func__));
   14109 	mutex_enter(sc->sc_ich_nvmmtx);
   14110 
   14111 	return 0;
   14112 }
   14113 
   14114 static void
   14115 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14116 {
   14117 
   14118 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14119 		device_xname(sc->sc_dev), __func__));
   14120 	mutex_exit(sc->sc_ich_nvmmtx);
   14121 }
   14122 
   14123 static int
   14124 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14125 {
   14126 	int i = 0;
   14127 	uint32_t reg;
   14128 
   14129 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14130 		device_xname(sc->sc_dev), __func__));
   14131 
   14132 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14133 	do {
   14134 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14135 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14136 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14137 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14138 			break;
   14139 		delay(2*1000);
   14140 		i++;
   14141 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14142 
   14143 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14144 		wm_put_hw_semaphore_82573(sc);
   14145 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14146 		    device_xname(sc->sc_dev));
   14147 		return -1;
   14148 	}
   14149 
   14150 	return 0;
   14151 }
   14152 
   14153 static void
   14154 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14155 {
   14156 	uint32_t reg;
   14157 
   14158 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14159 		device_xname(sc->sc_dev), __func__));
   14160 
   14161 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14162 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14163 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14164 }
   14165 
   14166 /*
   14167  * Management mode and power management related subroutines.
   14168  * BMC, AMT, suspend/resume and EEE.
   14169  */
   14170 
   14171 #ifdef WM_WOL
   14172 static int
   14173 wm_check_mng_mode(struct wm_softc *sc)
   14174 {
   14175 	int rv;
   14176 
   14177 	switch (sc->sc_type) {
   14178 	case WM_T_ICH8:
   14179 	case WM_T_ICH9:
   14180 	case WM_T_ICH10:
   14181 	case WM_T_PCH:
   14182 	case WM_T_PCH2:
   14183 	case WM_T_PCH_LPT:
   14184 	case WM_T_PCH_SPT:
   14185 	case WM_T_PCH_CNP:
   14186 		rv = wm_check_mng_mode_ich8lan(sc);
   14187 		break;
   14188 	case WM_T_82574:
   14189 	case WM_T_82583:
   14190 		rv = wm_check_mng_mode_82574(sc);
   14191 		break;
   14192 	case WM_T_82571:
   14193 	case WM_T_82572:
   14194 	case WM_T_82573:
   14195 	case WM_T_80003:
   14196 		rv = wm_check_mng_mode_generic(sc);
   14197 		break;
   14198 	default:
   14199 		/* Noting to do */
   14200 		rv = 0;
   14201 		break;
   14202 	}
   14203 
   14204 	return rv;
   14205 }
   14206 
   14207 static int
   14208 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14209 {
   14210 	uint32_t fwsm;
   14211 
   14212 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14213 
   14214 	if (((fwsm & FWSM_FW_VALID) != 0)
   14215 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14216 		return 1;
   14217 
   14218 	return 0;
   14219 }
   14220 
   14221 static int
   14222 wm_check_mng_mode_82574(struct wm_softc *sc)
   14223 {
   14224 	uint16_t data;
   14225 
   14226 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14227 
   14228 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14229 		return 1;
   14230 
   14231 	return 0;
   14232 }
   14233 
   14234 static int
   14235 wm_check_mng_mode_generic(struct wm_softc *sc)
   14236 {
   14237 	uint32_t fwsm;
   14238 
   14239 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14240 
   14241 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14242 		return 1;
   14243 
   14244 	return 0;
   14245 }
   14246 #endif /* WM_WOL */
   14247 
   14248 static int
   14249 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14250 {
   14251 	uint32_t manc, fwsm, factps;
   14252 
   14253 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14254 		return 0;
   14255 
   14256 	manc = CSR_READ(sc, WMREG_MANC);
   14257 
   14258 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14259 		device_xname(sc->sc_dev), manc));
   14260 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14261 		return 0;
   14262 
   14263 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14264 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14265 		factps = CSR_READ(sc, WMREG_FACTPS);
   14266 		if (((factps & FACTPS_MNGCG) == 0)
   14267 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14268 			return 1;
   14269 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14270 		uint16_t data;
   14271 
   14272 		factps = CSR_READ(sc, WMREG_FACTPS);
   14273 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14274 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14275 			device_xname(sc->sc_dev), factps, data));
   14276 		if (((factps & FACTPS_MNGCG) == 0)
   14277 		    && ((data & NVM_CFG2_MNGM_MASK)
   14278 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14279 			return 1;
   14280 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14281 	    && ((manc & MANC_ASF_EN) == 0))
   14282 		return 1;
   14283 
   14284 	return 0;
   14285 }
   14286 
   14287 static bool
   14288 wm_phy_resetisblocked(struct wm_softc *sc)
   14289 {
   14290 	bool blocked = false;
   14291 	uint32_t reg;
   14292 	int i = 0;
   14293 
   14294 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14295 		device_xname(sc->sc_dev), __func__));
   14296 
   14297 	switch (sc->sc_type) {
   14298 	case WM_T_ICH8:
   14299 	case WM_T_ICH9:
   14300 	case WM_T_ICH10:
   14301 	case WM_T_PCH:
   14302 	case WM_T_PCH2:
   14303 	case WM_T_PCH_LPT:
   14304 	case WM_T_PCH_SPT:
   14305 	case WM_T_PCH_CNP:
   14306 		do {
   14307 			reg = CSR_READ(sc, WMREG_FWSM);
   14308 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14309 				blocked = true;
   14310 				delay(10*1000);
   14311 				continue;
   14312 			}
   14313 			blocked = false;
   14314 		} while (blocked && (i++ < 30));
   14315 		return blocked;
   14316 		break;
   14317 	case WM_T_82571:
   14318 	case WM_T_82572:
   14319 	case WM_T_82573:
   14320 	case WM_T_82574:
   14321 	case WM_T_82583:
   14322 	case WM_T_80003:
   14323 		reg = CSR_READ(sc, WMREG_MANC);
   14324 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14325 			return true;
   14326 		else
   14327 			return false;
   14328 		break;
   14329 	default:
   14330 		/* No problem */
   14331 		break;
   14332 	}
   14333 
   14334 	return false;
   14335 }
   14336 
   14337 static void
   14338 wm_get_hw_control(struct wm_softc *sc)
   14339 {
   14340 	uint32_t reg;
   14341 
   14342 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14343 		device_xname(sc->sc_dev), __func__));
   14344 
   14345 	if (sc->sc_type == WM_T_82573) {
   14346 		reg = CSR_READ(sc, WMREG_SWSM);
   14347 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14348 	} else if (sc->sc_type >= WM_T_82571) {
   14349 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14350 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14351 	}
   14352 }
   14353 
   14354 static void
   14355 wm_release_hw_control(struct wm_softc *sc)
   14356 {
   14357 	uint32_t reg;
   14358 
   14359 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14360 		device_xname(sc->sc_dev), __func__));
   14361 
   14362 	if (sc->sc_type == WM_T_82573) {
   14363 		reg = CSR_READ(sc, WMREG_SWSM);
   14364 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14365 	} else if (sc->sc_type >= WM_T_82571) {
   14366 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14367 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14368 	}
   14369 }
   14370 
   14371 static void
   14372 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14373 {
   14374 	uint32_t reg;
   14375 
   14376 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14377 		device_xname(sc->sc_dev), __func__));
   14378 
   14379 	if (sc->sc_type < WM_T_PCH2)
   14380 		return;
   14381 
   14382 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14383 
   14384 	if (gate)
   14385 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14386 	else
   14387 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14388 
   14389 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14390 }
   14391 
   14392 static int
   14393 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14394 {
   14395 	uint32_t fwsm, reg;
   14396 	int rv = 0;
   14397 
   14398 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14399 		device_xname(sc->sc_dev), __func__));
   14400 
   14401 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14402 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14403 
   14404 	/* Disable ULP */
   14405 	wm_ulp_disable(sc);
   14406 
   14407 	/* Acquire PHY semaphore */
   14408 	rv = sc->phy.acquire(sc);
   14409 	if (rv != 0) {
   14410 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14411 		device_xname(sc->sc_dev), __func__));
   14412 		return -1;
   14413 	}
   14414 
   14415 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14416 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14417 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14418 	 */
   14419 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14420 	switch (sc->sc_type) {
   14421 	case WM_T_PCH_LPT:
   14422 	case WM_T_PCH_SPT:
   14423 	case WM_T_PCH_CNP:
   14424 		if (wm_phy_is_accessible_pchlan(sc))
   14425 			break;
   14426 
   14427 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14428 		 * forcing MAC to SMBus mode first.
   14429 		 */
   14430 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14431 		reg |= CTRL_EXT_FORCE_SMBUS;
   14432 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14433 #if 0
   14434 		/* XXX Isn't this required??? */
   14435 		CSR_WRITE_FLUSH(sc);
   14436 #endif
   14437 		/* Wait 50 milliseconds for MAC to finish any retries
   14438 		 * that it might be trying to perform from previous
   14439 		 * attempts to acknowledge any phy read requests.
   14440 		 */
   14441 		delay(50 * 1000);
   14442 		/* FALLTHROUGH */
   14443 	case WM_T_PCH2:
   14444 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14445 			break;
   14446 		/* FALLTHROUGH */
   14447 	case WM_T_PCH:
   14448 		if (sc->sc_type == WM_T_PCH)
   14449 			if ((fwsm & FWSM_FW_VALID) != 0)
   14450 				break;
   14451 
   14452 		if (wm_phy_resetisblocked(sc) == true) {
   14453 			printf("XXX reset is blocked(3)\n");
   14454 			break;
   14455 		}
   14456 
   14457 		/* Toggle LANPHYPC Value bit */
   14458 		wm_toggle_lanphypc_pch_lpt(sc);
   14459 
   14460 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14461 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14462 				break;
   14463 
   14464 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14465 			 * so ensure that the MAC is also out of SMBus mode
   14466 			 */
   14467 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14468 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14469 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14470 
   14471 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14472 				break;
   14473 			rv = -1;
   14474 		}
   14475 		break;
   14476 	default:
   14477 		break;
   14478 	}
   14479 
   14480 	/* Release semaphore */
   14481 	sc->phy.release(sc);
   14482 
   14483 	if (rv == 0) {
   14484 		/* Check to see if able to reset PHY.  Print error if not */
   14485 		if (wm_phy_resetisblocked(sc)) {
   14486 			printf("XXX reset is blocked(4)\n");
   14487 			goto out;
   14488 		}
   14489 
   14490 		/* Reset the PHY before any access to it.  Doing so, ensures
   14491 		 * that the PHY is in a known good state before we read/write
   14492 		 * PHY registers.  The generic reset is sufficient here,
   14493 		 * because we haven't determined the PHY type yet.
   14494 		 */
   14495 		if (wm_reset_phy(sc) != 0)
   14496 			goto out;
   14497 
   14498 		/* On a successful reset, possibly need to wait for the PHY
   14499 		 * to quiesce to an accessible state before returning control
   14500 		 * to the calling function.  If the PHY does not quiesce, then
   14501 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14502 		 *  the PHY is in.
   14503 		 */
   14504 		if (wm_phy_resetisblocked(sc))
   14505 			printf("XXX reset is blocked(4)\n");
   14506 	}
   14507 
   14508 out:
   14509 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14510 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14511 		delay(10*1000);
   14512 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14513 	}
   14514 
   14515 	return 0;
   14516 }
   14517 
   14518 static void
   14519 wm_init_manageability(struct wm_softc *sc)
   14520 {
   14521 
   14522 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14523 		device_xname(sc->sc_dev), __func__));
   14524 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14525 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14526 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14527 
   14528 		/* Disable hardware interception of ARP */
   14529 		manc &= ~MANC_ARP_EN;
   14530 
   14531 		/* Enable receiving management packets to the host */
   14532 		if (sc->sc_type >= WM_T_82571) {
   14533 			manc |= MANC_EN_MNG2HOST;
   14534 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14535 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14536 		}
   14537 
   14538 		CSR_WRITE(sc, WMREG_MANC, manc);
   14539 	}
   14540 }
   14541 
   14542 static void
   14543 wm_release_manageability(struct wm_softc *sc)
   14544 {
   14545 
   14546 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14547 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14548 
   14549 		manc |= MANC_ARP_EN;
   14550 		if (sc->sc_type >= WM_T_82571)
   14551 			manc &= ~MANC_EN_MNG2HOST;
   14552 
   14553 		CSR_WRITE(sc, WMREG_MANC, manc);
   14554 	}
   14555 }
   14556 
   14557 static void
   14558 wm_get_wakeup(struct wm_softc *sc)
   14559 {
   14560 
   14561 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14562 	switch (sc->sc_type) {
   14563 	case WM_T_82573:
   14564 	case WM_T_82583:
   14565 		sc->sc_flags |= WM_F_HAS_AMT;
   14566 		/* FALLTHROUGH */
   14567 	case WM_T_80003:
   14568 	case WM_T_82575:
   14569 	case WM_T_82576:
   14570 	case WM_T_82580:
   14571 	case WM_T_I350:
   14572 	case WM_T_I354:
   14573 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14574 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14575 		/* FALLTHROUGH */
   14576 	case WM_T_82541:
   14577 	case WM_T_82541_2:
   14578 	case WM_T_82547:
   14579 	case WM_T_82547_2:
   14580 	case WM_T_82571:
   14581 	case WM_T_82572:
   14582 	case WM_T_82574:
   14583 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14584 		break;
   14585 	case WM_T_ICH8:
   14586 	case WM_T_ICH9:
   14587 	case WM_T_ICH10:
   14588 	case WM_T_PCH:
   14589 	case WM_T_PCH2:
   14590 	case WM_T_PCH_LPT:
   14591 	case WM_T_PCH_SPT:
   14592 	case WM_T_PCH_CNP:
   14593 		sc->sc_flags |= WM_F_HAS_AMT;
   14594 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14595 		break;
   14596 	default:
   14597 		break;
   14598 	}
   14599 
   14600 	/* 1: HAS_MANAGE */
   14601 	if (wm_enable_mng_pass_thru(sc) != 0)
   14602 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14603 
   14604 	/*
   14605 	 * Note that the WOL flags is set after the resetting of the eeprom
   14606 	 * stuff
   14607 	 */
   14608 }
   14609 
   14610 /*
   14611  * Unconfigure Ultra Low Power mode.
   14612  * Only for I217 and newer (see below).
   14613  */
   14614 static int
   14615 wm_ulp_disable(struct wm_softc *sc)
   14616 {
   14617 	uint32_t reg;
   14618 	uint16_t phyreg;
   14619 	int i = 0, rv = 0;
   14620 
   14621 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14622 		device_xname(sc->sc_dev), __func__));
   14623 	/* Exclude old devices */
   14624 	if ((sc->sc_type < WM_T_PCH_LPT)
   14625 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14626 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14627 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14628 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14629 		return 0;
   14630 
   14631 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14632 		/* Request ME un-configure ULP mode in the PHY */
   14633 		reg = CSR_READ(sc, WMREG_H2ME);
   14634 		reg &= ~H2ME_ULP;
   14635 		reg |= H2ME_ENFORCE_SETTINGS;
   14636 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14637 
   14638 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14639 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14640 			if (i++ == 30) {
   14641 				printf("%s timed out\n", __func__);
   14642 				return -1;
   14643 			}
   14644 			delay(10 * 1000);
   14645 		}
   14646 		reg = CSR_READ(sc, WMREG_H2ME);
   14647 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14648 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14649 
   14650 		return 0;
   14651 	}
   14652 
   14653 	/* Acquire semaphore */
   14654 	rv = sc->phy.acquire(sc);
   14655 	if (rv != 0) {
   14656 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14657 		device_xname(sc->sc_dev), __func__));
   14658 		return -1;
   14659 	}
   14660 
   14661 	/* Toggle LANPHYPC */
   14662 	wm_toggle_lanphypc_pch_lpt(sc);
   14663 
   14664 	/* Unforce SMBus mode in PHY */
   14665 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14666 	if (rv != 0) {
   14667 		uint32_t reg2;
   14668 
   14669 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14670 			__func__);
   14671 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14672 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14673 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14674 		delay(50 * 1000);
   14675 
   14676 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14677 		    &phyreg);
   14678 		if (rv != 0)
   14679 			goto release;
   14680 	}
   14681 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14682 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14683 
   14684 	/* Unforce SMBus mode in MAC */
   14685 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14686 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14687 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14688 
   14689 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14690 	if (rv != 0)
   14691 		goto release;
   14692 	phyreg |= HV_PM_CTRL_K1_ENA;
   14693 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14694 
   14695 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14696 		&phyreg);
   14697 	if (rv != 0)
   14698 		goto release;
   14699 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14700 	    | I218_ULP_CONFIG1_STICKY_ULP
   14701 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14702 	    | I218_ULP_CONFIG1_WOL_HOST
   14703 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14704 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14705 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14706 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14707 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14708 	phyreg |= I218_ULP_CONFIG1_START;
   14709 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14710 
   14711 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14712 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14713 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14714 
   14715 release:
   14716 	/* Release semaphore */
   14717 	sc->phy.release(sc);
   14718 	wm_gmii_reset(sc);
   14719 	delay(50 * 1000);
   14720 
   14721 	return rv;
   14722 }
   14723 
   14724 /* WOL in the newer chipset interfaces (pchlan) */
   14725 static int
   14726 wm_enable_phy_wakeup(struct wm_softc *sc)
   14727 {
   14728 	device_t dev = sc->sc_dev;
   14729 	uint32_t mreg, moff;
   14730 	uint16_t wuce, wuc, wufc, preg;
   14731 	int i, rv;
   14732 
   14733 	KASSERT(sc->sc_type >= WM_T_PCH);
   14734 
   14735 	/* Copy MAC RARs to PHY RARs */
   14736 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14737 
   14738 	/* Activate PHY wakeup */
   14739 	rv = sc->phy.acquire(sc);
   14740 	if (rv != 0) {
   14741 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14742 		    __func__);
   14743 		return rv;
   14744 	}
   14745 
   14746 	/*
   14747 	 * Enable access to PHY wakeup registers.
   14748 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14749 	 */
   14750 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14751 	if (rv != 0) {
   14752 		device_printf(dev,
   14753 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14754 		goto release;
   14755 	}
   14756 
   14757 	/* Copy MAC MTA to PHY MTA */
   14758 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14759 		uint16_t lo, hi;
   14760 
   14761 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14762 		lo = (uint16_t)(mreg & 0xffff);
   14763 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14764 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14765 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14766 	}
   14767 
   14768 	/* Configure PHY Rx Control register */
   14769 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14770 	mreg = CSR_READ(sc, WMREG_RCTL);
   14771 	if (mreg & RCTL_UPE)
   14772 		preg |= BM_RCTL_UPE;
   14773 	if (mreg & RCTL_MPE)
   14774 		preg |= BM_RCTL_MPE;
   14775 	preg &= ~(BM_RCTL_MO_MASK);
   14776 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14777 	if (moff != 0)
   14778 		preg |= moff << BM_RCTL_MO_SHIFT;
   14779 	if (mreg & RCTL_BAM)
   14780 		preg |= BM_RCTL_BAM;
   14781 	if (mreg & RCTL_PMCF)
   14782 		preg |= BM_RCTL_PMCF;
   14783 	mreg = CSR_READ(sc, WMREG_CTRL);
   14784 	if (mreg & CTRL_RFCE)
   14785 		preg |= BM_RCTL_RFCE;
   14786 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14787 
   14788 	wuc = WUC_APME | WUC_PME_EN;
   14789 	wufc = WUFC_MAG;
   14790 	/* Enable PHY wakeup in MAC register */
   14791 	CSR_WRITE(sc, WMREG_WUC,
   14792 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14793 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14794 
   14795 	/* Configure and enable PHY wakeup in PHY registers */
   14796 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14797 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14798 
   14799 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14800 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14801 
   14802 release:
   14803 	sc->phy.release(sc);
   14804 
   14805 	return 0;
   14806 }
   14807 
   14808 /* Power down workaround on D3 */
   14809 static void
   14810 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14811 {
   14812 	uint32_t reg;
   14813 	uint16_t phyreg;
   14814 	int i;
   14815 
   14816 	for (i = 0; i < 2; i++) {
   14817 		/* Disable link */
   14818 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14819 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14820 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14821 
   14822 		/*
   14823 		 * Call gig speed drop workaround on Gig disable before
   14824 		 * accessing any PHY registers
   14825 		 */
   14826 		if (sc->sc_type == WM_T_ICH8)
   14827 			wm_gig_downshift_workaround_ich8lan(sc);
   14828 
   14829 		/* Write VR power-down enable */
   14830 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14831 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14832 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14833 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14834 
   14835 		/* Read it back and test */
   14836 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14837 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14838 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14839 			break;
   14840 
   14841 		/* Issue PHY reset and repeat at most one more time */
   14842 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14843 	}
   14844 }
   14845 
   14846 /*
   14847  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14848  *  @sc: pointer to the HW structure
   14849  *
   14850  *  During S0 to Sx transition, it is possible the link remains at gig
   14851  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14852  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14853  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14854  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14855  *  needs to be written.
   14856  *  Parts that support (and are linked to a partner which support) EEE in
   14857  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14858  *  than 10Mbps w/o EEE.
   14859  */
   14860 static void
   14861 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14862 {
   14863 	device_t dev = sc->sc_dev;
   14864 	struct ethercom *ec = &sc->sc_ethercom;
   14865 	uint32_t phy_ctrl;
   14866 	int rv;
   14867 
   14868 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14869 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14870 
   14871 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14872 
   14873 	if (sc->sc_phytype == WMPHY_I217) {
   14874 		uint16_t devid = sc->sc_pcidevid;
   14875 
   14876 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14877 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14878 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14879 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14880 		    (sc->sc_type >= WM_T_PCH_SPT))
   14881 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14882 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14883 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14884 
   14885 		if (sc->phy.acquire(sc) != 0)
   14886 			goto out;
   14887 
   14888 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14889 			uint16_t eee_advert;
   14890 
   14891 			rv = wm_read_emi_reg_locked(dev,
   14892 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14893 			if (rv)
   14894 				goto release;
   14895 
   14896 			/*
   14897 			 * Disable LPLU if both link partners support 100BaseT
   14898 			 * EEE and 100Full is advertised on both ends of the
   14899 			 * link, and enable Auto Enable LPI since there will
   14900 			 * be no driver to enable LPI while in Sx.
   14901 			 */
   14902 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14903 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14904 				uint16_t anar, phy_reg;
   14905 
   14906 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14907 				    &anar);
   14908 				if (anar & ANAR_TX_FD) {
   14909 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14910 					    PHY_CTRL_NOND0A_LPLU);
   14911 
   14912 					/* Set Auto Enable LPI after link up */
   14913 					sc->phy.readreg_locked(dev, 2,
   14914 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14915 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14916 					sc->phy.writereg_locked(dev, 2,
   14917 					    I217_LPI_GPIO_CTRL, phy_reg);
   14918 				}
   14919 			}
   14920 		}
   14921 
   14922 		/*
   14923 		 * For i217 Intel Rapid Start Technology support,
   14924 		 * when the system is going into Sx and no manageability engine
   14925 		 * is present, the driver must configure proxy to reset only on
   14926 		 * power good.	LPI (Low Power Idle) state must also reset only
   14927 		 * on power good, as well as the MTA (Multicast table array).
   14928 		 * The SMBus release must also be disabled on LCD reset.
   14929 		 */
   14930 
   14931 		/*
   14932 		 * Enable MTA to reset for Intel Rapid Start Technology
   14933 		 * Support
   14934 		 */
   14935 
   14936 release:
   14937 		sc->phy.release(sc);
   14938 	}
   14939 out:
   14940 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14941 
   14942 	if (sc->sc_type == WM_T_ICH8)
   14943 		wm_gig_downshift_workaround_ich8lan(sc);
   14944 
   14945 	if (sc->sc_type >= WM_T_PCH) {
   14946 		wm_oem_bits_config_ich8lan(sc, false);
   14947 
   14948 		/* Reset PHY to activate OEM bits on 82577/8 */
   14949 		if (sc->sc_type == WM_T_PCH)
   14950 			wm_reset_phy(sc);
   14951 
   14952 		if (sc->phy.acquire(sc) != 0)
   14953 			return;
   14954 		wm_write_smbus_addr(sc);
   14955 		sc->phy.release(sc);
   14956 	}
   14957 }
   14958 
   14959 /*
   14960  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14961  *  @sc: pointer to the HW structure
   14962  *
   14963  *  During Sx to S0 transitions on non-managed devices or managed devices
   14964  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14965  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14966  *  the PHY.
   14967  *  On i217, setup Intel Rapid Start Technology.
   14968  */
   14969 static int
   14970 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14971 {
   14972 	device_t dev = sc->sc_dev;
   14973 	int rv;
   14974 
   14975 	if (sc->sc_type < WM_T_PCH2)
   14976 		return 0;
   14977 
   14978 	rv = wm_init_phy_workarounds_pchlan(sc);
   14979 	if (rv != 0)
   14980 		return -1;
   14981 
   14982 	/* For i217 Intel Rapid Start Technology support when the system
   14983 	 * is transitioning from Sx and no manageability engine is present
   14984 	 * configure SMBus to restore on reset, disable proxy, and enable
   14985 	 * the reset on MTA (Multicast table array).
   14986 	 */
   14987 	if (sc->sc_phytype == WMPHY_I217) {
   14988 		uint16_t phy_reg;
   14989 
   14990 		if (sc->phy.acquire(sc) != 0)
   14991 			return -1;
   14992 
   14993 		/* Clear Auto Enable LPI after link up */
   14994 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14995 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14996 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14997 
   14998 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14999 			/* Restore clear on SMB if no manageability engine
   15000 			 * is present
   15001 			 */
   15002 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15003 			    &phy_reg);
   15004 			if (rv != 0)
   15005 				goto release;
   15006 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15007 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15008 
   15009 			/* Disable Proxy */
   15010 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15011 		}
   15012 		/* Enable reset on MTA */
   15013 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15014 		if (rv != 0)
   15015 			goto release;
   15016 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15017 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15018 
   15019 release:
   15020 		sc->phy.release(sc);
   15021 		return rv;
   15022 	}
   15023 
   15024 	return 0;
   15025 }
   15026 
   15027 static void
   15028 wm_enable_wakeup(struct wm_softc *sc)
   15029 {
   15030 	uint32_t reg, pmreg;
   15031 	pcireg_t pmode;
   15032 	int rv = 0;
   15033 
   15034 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15035 		device_xname(sc->sc_dev), __func__));
   15036 
   15037 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15038 	    &pmreg, NULL) == 0)
   15039 		return;
   15040 
   15041 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15042 		goto pme;
   15043 
   15044 	/* Advertise the wakeup capability */
   15045 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15046 	    | CTRL_SWDPIN(3));
   15047 
   15048 	/* Keep the laser running on fiber adapters */
   15049 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15050 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15051 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15052 		reg |= CTRL_EXT_SWDPIN(3);
   15053 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15054 	}
   15055 
   15056 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15057 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15058 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15059 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15060 		wm_suspend_workarounds_ich8lan(sc);
   15061 
   15062 #if 0	/* For the multicast packet */
   15063 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15064 	reg |= WUFC_MC;
   15065 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15066 #endif
   15067 
   15068 	if (sc->sc_type >= WM_T_PCH) {
   15069 		rv = wm_enable_phy_wakeup(sc);
   15070 		if (rv != 0)
   15071 			goto pme;
   15072 	} else {
   15073 		/* Enable wakeup by the MAC */
   15074 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15075 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15076 	}
   15077 
   15078 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15079 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15080 		|| (sc->sc_type == WM_T_PCH2))
   15081 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15082 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15083 
   15084 pme:
   15085 	/* Request PME */
   15086 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15087 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15088 		/* For WOL */
   15089 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15090 	} else {
   15091 		/* Disable WOL */
   15092 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15093 	}
   15094 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15095 }
   15096 
   15097 /* Disable ASPM L0s and/or L1 for workaround */
   15098 static void
   15099 wm_disable_aspm(struct wm_softc *sc)
   15100 {
   15101 	pcireg_t reg, mask = 0;
   15102 	unsigned const char *str = "";
   15103 
   15104 	/*
   15105 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15106 	 * space.
   15107 	 */
   15108 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15109 		return;
   15110 
   15111 	switch (sc->sc_type) {
   15112 	case WM_T_82571:
   15113 	case WM_T_82572:
   15114 		/*
   15115 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15116 		 * State Power management L1 State (ASPM L1).
   15117 		 */
   15118 		mask = PCIE_LCSR_ASPM_L1;
   15119 		str = "L1 is";
   15120 		break;
   15121 	case WM_T_82573:
   15122 	case WM_T_82574:
   15123 	case WM_T_82583:
   15124 		/*
   15125 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15126 		 *
   15127 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15128 		 * some chipset.  The document of 82574 and 82583 says that
   15129 		 * disabling L0s with some specific chipset is sufficient,
   15130 		 * but we follow as of the Intel em driver does.
   15131 		 *
   15132 		 * References:
   15133 		 * Errata 8 of the Specification Update of i82573.
   15134 		 * Errata 20 of the Specification Update of i82574.
   15135 		 * Errata 9 of the Specification Update of i82583.
   15136 		 */
   15137 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15138 		str = "L0s and L1 are";
   15139 		break;
   15140 	default:
   15141 		return;
   15142 	}
   15143 
   15144 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15145 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15146 	reg &= ~mask;
   15147 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15148 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15149 
   15150 	/* Print only in wm_attach() */
   15151 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15152 		aprint_verbose_dev(sc->sc_dev,
   15153 		    "ASPM %s disabled to workaround the errata.\n", str);
   15154 }
   15155 
   15156 /* LPLU */
   15157 
   15158 static void
   15159 wm_lplu_d0_disable(struct wm_softc *sc)
   15160 {
   15161 	struct mii_data *mii = &sc->sc_mii;
   15162 	uint32_t reg;
   15163 	uint16_t phyval;
   15164 
   15165 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15166 		device_xname(sc->sc_dev), __func__));
   15167 
   15168 	if (sc->sc_phytype == WMPHY_IFE)
   15169 		return;
   15170 
   15171 	switch (sc->sc_type) {
   15172 	case WM_T_82571:
   15173 	case WM_T_82572:
   15174 	case WM_T_82573:
   15175 	case WM_T_82575:
   15176 	case WM_T_82576:
   15177 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15178 		phyval &= ~PMR_D0_LPLU;
   15179 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15180 		break;
   15181 	case WM_T_82580:
   15182 	case WM_T_I350:
   15183 	case WM_T_I210:
   15184 	case WM_T_I211:
   15185 		reg = CSR_READ(sc, WMREG_PHPM);
   15186 		reg &= ~PHPM_D0A_LPLU;
   15187 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15188 		break;
   15189 	case WM_T_82574:
   15190 	case WM_T_82583:
   15191 	case WM_T_ICH8:
   15192 	case WM_T_ICH9:
   15193 	case WM_T_ICH10:
   15194 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15195 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15196 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15197 		CSR_WRITE_FLUSH(sc);
   15198 		break;
   15199 	case WM_T_PCH:
   15200 	case WM_T_PCH2:
   15201 	case WM_T_PCH_LPT:
   15202 	case WM_T_PCH_SPT:
   15203 	case WM_T_PCH_CNP:
   15204 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15205 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15206 		if (wm_phy_resetisblocked(sc) == false)
   15207 			phyval |= HV_OEM_BITS_ANEGNOW;
   15208 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15209 		break;
   15210 	default:
   15211 		break;
   15212 	}
   15213 }
   15214 
   15215 /* EEE */
   15216 
   15217 static int
   15218 wm_set_eee_i350(struct wm_softc *sc)
   15219 {
   15220 	struct ethercom *ec = &sc->sc_ethercom;
   15221 	uint32_t ipcnfg, eeer;
   15222 	uint32_t ipcnfg_mask
   15223 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15224 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15225 
   15226 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15227 
   15228 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15229 	eeer = CSR_READ(sc, WMREG_EEER);
   15230 
   15231 	/* Enable or disable per user setting */
   15232 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15233 		ipcnfg |= ipcnfg_mask;
   15234 		eeer |= eeer_mask;
   15235 	} else {
   15236 		ipcnfg &= ~ipcnfg_mask;
   15237 		eeer &= ~eeer_mask;
   15238 	}
   15239 
   15240 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15241 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15242 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15243 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15244 
   15245 	return 0;
   15246 }
   15247 
   15248 static int
   15249 wm_set_eee_pchlan(struct wm_softc *sc)
   15250 {
   15251 	device_t dev = sc->sc_dev;
   15252 	struct ethercom *ec = &sc->sc_ethercom;
   15253 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15254 	int rv = 0;
   15255 
   15256 	switch (sc->sc_phytype) {
   15257 	case WMPHY_82579:
   15258 		lpa = I82579_EEE_LP_ABILITY;
   15259 		pcs_status = I82579_EEE_PCS_STATUS;
   15260 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15261 		break;
   15262 	case WMPHY_I217:
   15263 		lpa = I217_EEE_LP_ABILITY;
   15264 		pcs_status = I217_EEE_PCS_STATUS;
   15265 		adv_addr = I217_EEE_ADVERTISEMENT;
   15266 		break;
   15267 	default:
   15268 		return 0;
   15269 	}
   15270 
   15271 	if (sc->phy.acquire(sc)) {
   15272 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15273 		return 0;
   15274 	}
   15275 
   15276 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15277 	if (rv != 0)
   15278 		goto release;
   15279 
   15280 	/* Clear bits that enable EEE in various speeds */
   15281 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15282 
   15283 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15284 		/* Save off link partner's EEE ability */
   15285 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15286 		if (rv != 0)
   15287 			goto release;
   15288 
   15289 		/* Read EEE advertisement */
   15290 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15291 			goto release;
   15292 
   15293 		/*
   15294 		 * Enable EEE only for speeds in which the link partner is
   15295 		 * EEE capable and for which we advertise EEE.
   15296 		 */
   15297 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15298 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15299 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15300 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15301 			if ((data & ANLPAR_TX_FD) != 0)
   15302 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15303 			else {
   15304 				/*
   15305 				 * EEE is not supported in 100Half, so ignore
   15306 				 * partner's EEE in 100 ability if full-duplex
   15307 				 * is not advertised.
   15308 				 */
   15309 				sc->eee_lp_ability
   15310 				    &= ~AN_EEEADVERT_100_TX;
   15311 			}
   15312 		}
   15313 	}
   15314 
   15315 	if (sc->sc_phytype == WMPHY_82579) {
   15316 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15317 		if (rv != 0)
   15318 			goto release;
   15319 
   15320 		data &= ~I82579_LPI_PLL_SHUT_100;
   15321 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15322 	}
   15323 
   15324 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15325 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15326 		goto release;
   15327 
   15328 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15329 release:
   15330 	sc->phy.release(sc);
   15331 
   15332 	return rv;
   15333 }
   15334 
   15335 static int
   15336 wm_set_eee(struct wm_softc *sc)
   15337 {
   15338 	struct ethercom *ec = &sc->sc_ethercom;
   15339 
   15340 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15341 		return 0;
   15342 
   15343 	if (sc->sc_type == WM_T_I354) {
   15344 		/* I354 uses an external PHY */
   15345 		return 0; /* not yet */
   15346 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15347 		return wm_set_eee_i350(sc);
   15348 	else if (sc->sc_type >= WM_T_PCH2)
   15349 		return wm_set_eee_pchlan(sc);
   15350 
   15351 	return 0;
   15352 }
   15353 
   15354 /*
   15355  * Workarounds (mainly PHY related).
   15356  * Basically, PHY's workarounds are in the PHY drivers.
   15357  */
   15358 
   15359 /* Work-around for 82566 Kumeran PCS lock loss */
   15360 static int
   15361 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15362 {
   15363 	struct mii_data *mii = &sc->sc_mii;
   15364 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15365 	int i, reg, rv;
   15366 	uint16_t phyreg;
   15367 
   15368 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15369 		device_xname(sc->sc_dev), __func__));
   15370 
   15371 	/* If the link is not up, do nothing */
   15372 	if ((status & STATUS_LU) == 0)
   15373 		return 0;
   15374 
   15375 	/* Nothing to do if the link is other than 1Gbps */
   15376 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15377 		return 0;
   15378 
   15379 	for (i = 0; i < 10; i++) {
   15380 		/* read twice */
   15381 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15382 		if (rv != 0)
   15383 			return rv;
   15384 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15385 		if (rv != 0)
   15386 			return rv;
   15387 
   15388 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15389 			goto out;	/* GOOD! */
   15390 
   15391 		/* Reset the PHY */
   15392 		wm_reset_phy(sc);
   15393 		delay(5*1000);
   15394 	}
   15395 
   15396 	/* Disable GigE link negotiation */
   15397 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15398 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15399 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15400 
   15401 	/*
   15402 	 * Call gig speed drop workaround on Gig disable before accessing
   15403 	 * any PHY registers.
   15404 	 */
   15405 	wm_gig_downshift_workaround_ich8lan(sc);
   15406 
   15407 out:
   15408 	return 0;
   15409 }
   15410 
   15411 /*
   15412  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15413  *  @sc: pointer to the HW structure
   15414  *
   15415  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15416  *  LPLU, Gig disable, MDIC PHY reset):
   15417  *    1) Set Kumeran Near-end loopback
   15418  *    2) Clear Kumeran Near-end loopback
   15419  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15420  */
   15421 static void
   15422 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15423 {
   15424 	uint16_t kmreg;
   15425 
   15426 	/* Only for igp3 */
   15427 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15428 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15429 			return;
   15430 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15431 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15432 			return;
   15433 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15434 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15435 	}
   15436 }
   15437 
   15438 /*
   15439  * Workaround for pch's PHYs
   15440  * XXX should be moved to new PHY driver?
   15441  */
   15442 static int
   15443 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15444 {
   15445 	device_t dev = sc->sc_dev;
   15446 	struct mii_data *mii = &sc->sc_mii;
   15447 	struct mii_softc *child;
   15448 	uint16_t phy_data, phyrev = 0;
   15449 	int phytype = sc->sc_phytype;
   15450 	int rv;
   15451 
   15452 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15453 		device_xname(dev), __func__));
   15454 	KASSERT(sc->sc_type == WM_T_PCH);
   15455 
   15456 	/* Set MDIO slow mode before any other MDIO access */
   15457 	if (phytype == WMPHY_82577)
   15458 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15459 			return rv;
   15460 
   15461 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15462 	if (child != NULL)
   15463 		phyrev = child->mii_mpd_rev;
   15464 
   15465 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15466 	if ((child != NULL) &&
   15467 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15468 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15469 		/* Disable generation of early preamble (0x4431) */
   15470 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15471 		    &phy_data);
   15472 		if (rv != 0)
   15473 			return rv;
   15474 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15475 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15476 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15477 		    phy_data);
   15478 		if (rv != 0)
   15479 			return rv;
   15480 
   15481 		/* Preamble tuning for SSC */
   15482 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15483 		if (rv != 0)
   15484 			return rv;
   15485 	}
   15486 
   15487 	/* 82578 */
   15488 	if (phytype == WMPHY_82578) {
   15489 		/*
   15490 		 * Return registers to default by doing a soft reset then
   15491 		 * writing 0x3140 to the control register
   15492 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15493 		 */
   15494 		if ((child != NULL) && (phyrev < 2)) {
   15495 			PHY_RESET(child);
   15496 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15497 			    0x3140);
   15498 			if (rv != 0)
   15499 				return rv;
   15500 		}
   15501 	}
   15502 
   15503 	/* Select page 0 */
   15504 	if ((rv = sc->phy.acquire(sc)) != 0)
   15505 		return rv;
   15506 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15507 	sc->phy.release(sc);
   15508 	if (rv != 0)
   15509 		return rv;
   15510 
   15511 	/*
   15512 	 * Configure the K1 Si workaround during phy reset assuming there is
   15513 	 * link so that it disables K1 if link is in 1Gbps.
   15514 	 */
   15515 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15516 		return rv;
   15517 
   15518 	/* Workaround for link disconnects on a busy hub in half duplex */
   15519 	rv = sc->phy.acquire(sc);
   15520 	if (rv)
   15521 		return rv;
   15522 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15523 	if (rv)
   15524 		goto release;
   15525 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15526 	    phy_data & 0x00ff);
   15527 	if (rv)
   15528 		goto release;
   15529 
   15530 	/* Set MSE higher to enable link to stay up when noise is high */
   15531 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15532 release:
   15533 	sc->phy.release(sc);
   15534 
   15535 	return rv;
   15536 }
   15537 
   15538 /*
   15539  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15540  *  @sc:   pointer to the HW structure
   15541  */
   15542 static void
   15543 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15544 {
   15545 	device_t dev = sc->sc_dev;
   15546 	uint32_t mac_reg;
   15547 	uint16_t i, wuce;
   15548 	int count;
   15549 
   15550 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15551 		device_xname(sc->sc_dev), __func__));
   15552 
   15553 	if (sc->phy.acquire(sc) != 0)
   15554 		return;
   15555 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15556 		goto release;
   15557 
   15558 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15559 	count = wm_rar_count(sc);
   15560 	for (i = 0; i < count; i++) {
   15561 		uint16_t lo, hi;
   15562 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15563 		lo = (uint16_t)(mac_reg & 0xffff);
   15564 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15565 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15566 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15567 
   15568 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15569 		lo = (uint16_t)(mac_reg & 0xffff);
   15570 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15571 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15572 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15573 	}
   15574 
   15575 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15576 
   15577 release:
   15578 	sc->phy.release(sc);
   15579 }
   15580 
   15581 /*
   15582  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15583  *  done after every PHY reset.
   15584  */
   15585 static int
   15586 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15587 {
   15588 	device_t dev = sc->sc_dev;
   15589 	int rv;
   15590 
   15591 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15592 		device_xname(dev), __func__));
   15593 	KASSERT(sc->sc_type == WM_T_PCH2);
   15594 
   15595 	/* Set MDIO slow mode before any other MDIO access */
   15596 	rv = wm_set_mdio_slow_mode_hv(sc);
   15597 	if (rv != 0)
   15598 		return rv;
   15599 
   15600 	rv = sc->phy.acquire(sc);
   15601 	if (rv != 0)
   15602 		return rv;
   15603 	/* Set MSE higher to enable link to stay up when noise is high */
   15604 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15605 	if (rv != 0)
   15606 		goto release;
   15607 	/* Drop link after 5 times MSE threshold was reached */
   15608 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15609 release:
   15610 	sc->phy.release(sc);
   15611 
   15612 	return rv;
   15613 }
   15614 
   15615 /**
   15616  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15617  *  @link: link up bool flag
   15618  *
   15619  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15620  *  preventing further DMA write requests.  Workaround the issue by disabling
   15621  *  the de-assertion of the clock request when in 1Gpbs mode.
   15622  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15623  *  speeds in order to avoid Tx hangs.
   15624  **/
   15625 static int
   15626 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15627 {
   15628 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15629 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15630 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15631 	uint16_t phyreg;
   15632 
   15633 	if (link && (speed == STATUS_SPEED_1000)) {
   15634 		sc->phy.acquire(sc);
   15635 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15636 		    &phyreg);
   15637 		if (rv != 0)
   15638 			goto release;
   15639 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15640 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15641 		if (rv != 0)
   15642 			goto release;
   15643 		delay(20);
   15644 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15645 
   15646 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15647 		    &phyreg);
   15648 release:
   15649 		sc->phy.release(sc);
   15650 		return rv;
   15651 	}
   15652 
   15653 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15654 
   15655 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15656 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15657 	    || !link
   15658 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15659 		goto update_fextnvm6;
   15660 
   15661 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15662 
   15663 	/* Clear link status transmit timeout */
   15664 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15665 	if (speed == STATUS_SPEED_100) {
   15666 		/* Set inband Tx timeout to 5x10us for 100Half */
   15667 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15668 
   15669 		/* Do not extend the K1 entry latency for 100Half */
   15670 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15671 	} else {
   15672 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15673 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15674 
   15675 		/* Extend the K1 entry latency for 10 Mbps */
   15676 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15677 	}
   15678 
   15679 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15680 
   15681 update_fextnvm6:
   15682 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15683 	return 0;
   15684 }
   15685 
   15686 /*
   15687  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15688  *  @sc:   pointer to the HW structure
   15689  *  @link: link up bool flag
   15690  *
   15691  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15692  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15693  *  If link is down, the function will restore the default K1 setting located
   15694  *  in the NVM.
   15695  */
   15696 static int
   15697 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15698 {
   15699 	int k1_enable = sc->sc_nvm_k1_enabled;
   15700 
   15701 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15702 		device_xname(sc->sc_dev), __func__));
   15703 
   15704 	if (sc->phy.acquire(sc) != 0)
   15705 		return -1;
   15706 
   15707 	if (link) {
   15708 		k1_enable = 0;
   15709 
   15710 		/* Link stall fix for link up */
   15711 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15712 		    0x0100);
   15713 	} else {
   15714 		/* Link stall fix for link down */
   15715 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15716 		    0x4100);
   15717 	}
   15718 
   15719 	wm_configure_k1_ich8lan(sc, k1_enable);
   15720 	sc->phy.release(sc);
   15721 
   15722 	return 0;
   15723 }
   15724 
   15725 /*
   15726  *  wm_k1_workaround_lv - K1 Si workaround
   15727  *  @sc:   pointer to the HW structure
   15728  *
   15729  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15730  *  Disable K1 for 1000 and 100 speeds
   15731  */
   15732 static int
   15733 wm_k1_workaround_lv(struct wm_softc *sc)
   15734 {
   15735 	uint32_t reg;
   15736 	uint16_t phyreg;
   15737 	int rv;
   15738 
   15739 	if (sc->sc_type != WM_T_PCH2)
   15740 		return 0;
   15741 
   15742 	/* Set K1 beacon duration based on 10Mbps speed */
   15743 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15744 	if (rv != 0)
   15745 		return rv;
   15746 
   15747 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15748 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15749 		if (phyreg &
   15750 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15751 			/* LV 1G/100 Packet drop issue wa  */
   15752 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15753 			    &phyreg);
   15754 			if (rv != 0)
   15755 				return rv;
   15756 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15757 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15758 			    phyreg);
   15759 			if (rv != 0)
   15760 				return rv;
   15761 		} else {
   15762 			/* For 10Mbps */
   15763 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15764 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15765 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15766 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15767 		}
   15768 	}
   15769 
   15770 	return 0;
   15771 }
   15772 
   15773 /*
   15774  *  wm_link_stall_workaround_hv - Si workaround
   15775  *  @sc: pointer to the HW structure
   15776  *
   15777  *  This function works around a Si bug where the link partner can get
   15778  *  a link up indication before the PHY does. If small packets are sent
   15779  *  by the link partner they can be placed in the packet buffer without
   15780  *  being properly accounted for by the PHY and will stall preventing
   15781  *  further packets from being received.  The workaround is to clear the
   15782  *  packet buffer after the PHY detects link up.
   15783  */
   15784 static int
   15785 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15786 {
   15787 	uint16_t phyreg;
   15788 
   15789 	if (sc->sc_phytype != WMPHY_82578)
   15790 		return 0;
   15791 
   15792 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15793 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15794 	if ((phyreg & BMCR_LOOP) != 0)
   15795 		return 0;
   15796 
   15797 	/* Check if link is up and at 1Gbps */
   15798 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15799 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15800 	    | BM_CS_STATUS_SPEED_MASK;
   15801 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15802 		| BM_CS_STATUS_SPEED_1000))
   15803 		return 0;
   15804 
   15805 	delay(200 * 1000);	/* XXX too big */
   15806 
   15807 	/* Flush the packets in the fifo buffer */
   15808 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15809 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15810 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15811 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15812 
   15813 	return 0;
   15814 }
   15815 
   15816 static int
   15817 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15818 {
   15819 	int rv;
   15820 	uint16_t reg;
   15821 
   15822 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15823 	if (rv != 0)
   15824 		return rv;
   15825 
   15826 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15827 	    reg | HV_KMRN_MDIO_SLOW);
   15828 }
   15829 
   15830 /*
   15831  *  wm_configure_k1_ich8lan - Configure K1 power state
   15832  *  @sc: pointer to the HW structure
   15833  *  @enable: K1 state to configure
   15834  *
   15835  *  Configure the K1 power state based on the provided parameter.
   15836  *  Assumes semaphore already acquired.
   15837  */
   15838 static void
   15839 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15840 {
   15841 	uint32_t ctrl, ctrl_ext, tmp;
   15842 	uint16_t kmreg;
   15843 	int rv;
   15844 
   15845 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15846 
   15847 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15848 	if (rv != 0)
   15849 		return;
   15850 
   15851 	if (k1_enable)
   15852 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15853 	else
   15854 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15855 
   15856 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15857 	if (rv != 0)
   15858 		return;
   15859 
   15860 	delay(20);
   15861 
   15862 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15863 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15864 
   15865 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15866 	tmp |= CTRL_FRCSPD;
   15867 
   15868 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15869 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15870 	CSR_WRITE_FLUSH(sc);
   15871 	delay(20);
   15872 
   15873 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15874 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15875 	CSR_WRITE_FLUSH(sc);
   15876 	delay(20);
   15877 
   15878 	return;
   15879 }
   15880 
   15881 /* special case - for 82575 - need to do manual init ... */
   15882 static void
   15883 wm_reset_init_script_82575(struct wm_softc *sc)
   15884 {
   15885 	/*
   15886 	 * Remark: this is untested code - we have no board without EEPROM
   15887 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15888 	 */
   15889 
   15890 	/* SerDes configuration via SERDESCTRL */
   15891 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15892 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15893 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15894 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15895 
   15896 	/* CCM configuration via CCMCTL register */
   15897 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15898 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15899 
   15900 	/* PCIe lanes configuration */
   15901 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15902 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15903 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15904 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15905 
   15906 	/* PCIe PLL Configuration */
   15907 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15908 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15909 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15910 }
   15911 
   15912 static void
   15913 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15914 {
   15915 	uint32_t reg;
   15916 	uint16_t nvmword;
   15917 	int rv;
   15918 
   15919 	if (sc->sc_type != WM_T_82580)
   15920 		return;
   15921 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15922 		return;
   15923 
   15924 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15925 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15926 	if (rv != 0) {
   15927 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15928 		    __func__);
   15929 		return;
   15930 	}
   15931 
   15932 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15933 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15934 		reg |= MDICNFG_DEST;
   15935 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15936 		reg |= MDICNFG_COM_MDIO;
   15937 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15938 }
   15939 
   15940 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15941 
   15942 static bool
   15943 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15944 {
   15945 	uint32_t reg;
   15946 	uint16_t id1, id2;
   15947 	int i, rv;
   15948 
   15949 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15950 		device_xname(sc->sc_dev), __func__));
   15951 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15952 
   15953 	id1 = id2 = 0xffff;
   15954 	for (i = 0; i < 2; i++) {
   15955 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15956 		    &id1);
   15957 		if ((rv != 0) || MII_INVALIDID(id1))
   15958 			continue;
   15959 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15960 		    &id2);
   15961 		if ((rv != 0) || MII_INVALIDID(id2))
   15962 			continue;
   15963 		break;
   15964 	}
   15965 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15966 		goto out;
   15967 
   15968 	/*
   15969 	 * In case the PHY needs to be in mdio slow mode,
   15970 	 * set slow mode and try to get the PHY id again.
   15971 	 */
   15972 	rv = 0;
   15973 	if (sc->sc_type < WM_T_PCH_LPT) {
   15974 		sc->phy.release(sc);
   15975 		wm_set_mdio_slow_mode_hv(sc);
   15976 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15977 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15978 		sc->phy.acquire(sc);
   15979 	}
   15980 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15981 		printf("XXX return with false\n");
   15982 		return false;
   15983 	}
   15984 out:
   15985 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15986 		/* Only unforce SMBus if ME is not active */
   15987 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15988 			uint16_t phyreg;
   15989 
   15990 			/* Unforce SMBus mode in PHY */
   15991 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15992 			    CV_SMB_CTRL, &phyreg);
   15993 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15994 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15995 			    CV_SMB_CTRL, phyreg);
   15996 
   15997 			/* Unforce SMBus mode in MAC */
   15998 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15999 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16000 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16001 		}
   16002 	}
   16003 	return true;
   16004 }
   16005 
   16006 static void
   16007 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16008 {
   16009 	uint32_t reg;
   16010 	int i;
   16011 
   16012 	/* Set PHY Config Counter to 50msec */
   16013 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16014 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16015 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16016 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16017 
   16018 	/* Toggle LANPHYPC */
   16019 	reg = CSR_READ(sc, WMREG_CTRL);
   16020 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16021 	reg &= ~CTRL_LANPHYPC_VALUE;
   16022 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16023 	CSR_WRITE_FLUSH(sc);
   16024 	delay(1000);
   16025 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16026 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16027 	CSR_WRITE_FLUSH(sc);
   16028 
   16029 	if (sc->sc_type < WM_T_PCH_LPT)
   16030 		delay(50 * 1000);
   16031 	else {
   16032 		i = 20;
   16033 
   16034 		do {
   16035 			delay(5 * 1000);
   16036 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16037 		    && i--);
   16038 
   16039 		delay(30 * 1000);
   16040 	}
   16041 }
   16042 
   16043 static int
   16044 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16045 {
   16046 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16047 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16048 	uint32_t rxa;
   16049 	uint16_t scale = 0, lat_enc = 0;
   16050 	int32_t obff_hwm = 0;
   16051 	int64_t lat_ns, value;
   16052 
   16053 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16054 		device_xname(sc->sc_dev), __func__));
   16055 
   16056 	if (link) {
   16057 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16058 		uint32_t status;
   16059 		uint16_t speed;
   16060 		pcireg_t preg;
   16061 
   16062 		status = CSR_READ(sc, WMREG_STATUS);
   16063 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16064 		case STATUS_SPEED_10:
   16065 			speed = 10;
   16066 			break;
   16067 		case STATUS_SPEED_100:
   16068 			speed = 100;
   16069 			break;
   16070 		case STATUS_SPEED_1000:
   16071 			speed = 1000;
   16072 			break;
   16073 		default:
   16074 			device_printf(sc->sc_dev, "Unknown speed "
   16075 			    "(status = %08x)\n", status);
   16076 			return -1;
   16077 		}
   16078 
   16079 		/* Rx Packet Buffer Allocation size (KB) */
   16080 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16081 
   16082 		/*
   16083 		 * Determine the maximum latency tolerated by the device.
   16084 		 *
   16085 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16086 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16087 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16088 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16089 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16090 		 */
   16091 		lat_ns = ((int64_t)rxa * 1024 -
   16092 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16093 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16094 		if (lat_ns < 0)
   16095 			lat_ns = 0;
   16096 		else
   16097 			lat_ns /= speed;
   16098 		value = lat_ns;
   16099 
   16100 		while (value > LTRV_VALUE) {
   16101 			scale ++;
   16102 			value = howmany(value, __BIT(5));
   16103 		}
   16104 		if (scale > LTRV_SCALE_MAX) {
   16105 			printf("%s: Invalid LTR latency scale %d\n",
   16106 			    device_xname(sc->sc_dev), scale);
   16107 			return -1;
   16108 		}
   16109 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16110 
   16111 		/* Determine the maximum latency tolerated by the platform */
   16112 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16113 		    WM_PCI_LTR_CAP_LPT);
   16114 		max_snoop = preg & 0xffff;
   16115 		max_nosnoop = preg >> 16;
   16116 
   16117 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16118 
   16119 		if (lat_enc > max_ltr_enc) {
   16120 			lat_enc = max_ltr_enc;
   16121 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16122 			    * PCI_LTR_SCALETONS(
   16123 				    __SHIFTOUT(lat_enc,
   16124 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16125 		}
   16126 
   16127 		if (lat_ns) {
   16128 			lat_ns *= speed * 1000;
   16129 			lat_ns /= 8;
   16130 			lat_ns /= 1000000000;
   16131 			obff_hwm = (int32_t)(rxa - lat_ns);
   16132 		}
   16133 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16134 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16135 			    "(rxa = %d, lat_ns = %d)\n",
   16136 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16137 			return -1;
   16138 		}
   16139 	}
   16140 	/* Snoop and No-Snoop latencies the same */
   16141 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16142 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16143 
   16144 	/* Set OBFF high water mark */
   16145 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16146 	reg |= obff_hwm;
   16147 	CSR_WRITE(sc, WMREG_SVT, reg);
   16148 
   16149 	/* Enable OBFF */
   16150 	reg = CSR_READ(sc, WMREG_SVCR);
   16151 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16152 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16153 
   16154 	return 0;
   16155 }
   16156 
   16157 /*
   16158  * I210 Errata 25 and I211 Errata 10
   16159  * Slow System Clock.
   16160  */
   16161 static int
   16162 wm_pll_workaround_i210(struct wm_softc *sc)
   16163 {
   16164 	uint32_t mdicnfg, wuc;
   16165 	uint32_t reg;
   16166 	pcireg_t pcireg;
   16167 	uint32_t pmreg;
   16168 	uint16_t nvmword, tmp_nvmword;
   16169 	uint16_t phyval;
   16170 	bool wa_done = false;
   16171 	int i, rv = 0;
   16172 
   16173 	/* Get Power Management cap offset */
   16174 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16175 	    &pmreg, NULL) == 0)
   16176 		return -1;
   16177 
   16178 	/* Save WUC and MDICNFG registers */
   16179 	wuc = CSR_READ(sc, WMREG_WUC);
   16180 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16181 
   16182 	reg = mdicnfg & ~MDICNFG_DEST;
   16183 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16184 
   16185 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16186 		nvmword = INVM_DEFAULT_AL;
   16187 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16188 
   16189 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16190 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16191 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16192 
   16193 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16194 			rv = 0;
   16195 			break; /* OK */
   16196 		} else
   16197 			rv = -1;
   16198 
   16199 		wa_done = true;
   16200 		/* Directly reset the internal PHY */
   16201 		reg = CSR_READ(sc, WMREG_CTRL);
   16202 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16203 
   16204 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16205 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16206 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16207 
   16208 		CSR_WRITE(sc, WMREG_WUC, 0);
   16209 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16210 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16211 
   16212 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16213 		    pmreg + PCI_PMCSR);
   16214 		pcireg |= PCI_PMCSR_STATE_D3;
   16215 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16216 		    pmreg + PCI_PMCSR, pcireg);
   16217 		delay(1000);
   16218 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16219 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16220 		    pmreg + PCI_PMCSR, pcireg);
   16221 
   16222 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16223 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16224 
   16225 		/* Restore WUC register */
   16226 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16227 	}
   16228 
   16229 	/* Restore MDICNFG setting */
   16230 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16231 	if (wa_done)
   16232 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16233 	return rv;
   16234 }
   16235 
   16236 static void
   16237 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16238 {
   16239 	uint32_t reg;
   16240 
   16241 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16242 		device_xname(sc->sc_dev), __func__));
   16243 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16244 	    || (sc->sc_type == WM_T_PCH_CNP));
   16245 
   16246 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16247 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16248 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16249 
   16250 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16251 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16252 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16253 }
   16254