Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.642
      1 /*	$NetBSD: if_wm.c,v 1.642 2019/07/09 08:46:59 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.642 2019/07/09 08:46:59 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_ec_capenable;		/* last ec_capenable */
    518 	int sc_flowflags;		/* 802.3x flow control flags */
    519 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    520 	int sc_align_tweak;
    521 
    522 	void *sc_ihs[WM_MAX_NINTR];	/*
    523 					 * interrupt cookie.
    524 					 * - legacy and msi use sc_ihs[0] only
    525 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	pci_intr_handle_t *sc_intrs;	/*
    528 					 * legacy and msi use sc_intrs[0] only
    529 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    530 					 */
    531 	int sc_nintrs;			/* number of interrupts */
    532 
    533 	int sc_link_intr_idx;		/* index of MSI-X tables */
    534 
    535 	callout_t sc_tick_ch;		/* tick callout */
    536 	bool sc_core_stopping;
    537 
    538 	int sc_nvm_ver_major;
    539 	int sc_nvm_ver_minor;
    540 	int sc_nvm_ver_build;
    541 	int sc_nvm_addrbits;		/* NVM address bits */
    542 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    543 	int sc_ich8_flash_base;
    544 	int sc_ich8_flash_bank_size;
    545 	int sc_nvm_k1_enabled;
    546 
    547 	int sc_nqueues;
    548 	struct wm_queue *sc_queue;
    549 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    550 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    551 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    552 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    553 
    554 	int sc_affinity_offset;
    555 
    556 #ifdef WM_EVENT_COUNTERS
    557 	/* Event counters. */
    558 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    559 
    560 	/* WM_T_82542_2_1 only */
    561 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    564 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    565 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    566 #endif /* WM_EVENT_COUNTERS */
    567 
    568 	/* This variable are used only on the 82547. */
    569 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    570 
    571 	uint32_t sc_ctrl;		/* prototype CTRL register */
    572 #if 0
    573 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    574 #endif
    575 	uint32_t sc_icr;		/* prototype interrupt bits */
    576 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    577 	uint32_t sc_tctl;		/* prototype TCTL register */
    578 	uint32_t sc_rctl;		/* prototype RCTL register */
    579 	uint32_t sc_txcw;		/* prototype TXCW register */
    580 	uint32_t sc_tipg;		/* prototype TIPG register */
    581 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    582 	uint32_t sc_pba;		/* prototype PBA register */
    583 
    584 	int sc_tbi_linkup;		/* TBI link status */
    585 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    586 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    587 
    588 	int sc_mchash_type;		/* multicast filter offset */
    589 
    590 	krndsource_t rnd_source;	/* random source */
    591 
    592 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    593 
    594 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    595 	kmutex_t *sc_ich_phymtx;	/*
    596 					 * 82574/82583/ICH/PCH specific PHY
    597 					 * mutex. For 82574/82583, the mutex
    598 					 * is used for both PHY and NVM.
    599 					 */
    600 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    601 
    602 	struct wm_phyop phy;
    603 	struct wm_nvmop nvm;
    604 };
    605 
    606 #define WM_CORE_LOCK(_sc)						\
    607 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)						\
    609 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    610 #define WM_CORE_LOCKED(_sc)						\
    611 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    612 
    613 #define	WM_RXCHAIN_RESET(rxq)						\
    614 do {									\
    615 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    616 	*(rxq)->rxq_tailp = NULL;					\
    617 	(rxq)->rxq_len = 0;						\
    618 } while (/*CONSTCOND*/0)
    619 
    620 #define	WM_RXCHAIN_LINK(rxq, m)						\
    621 do {									\
    622 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    623 	(rxq)->rxq_tailp = &(m)->m_next;				\
    624 } while (/*CONSTCOND*/0)
    625 
    626 #ifdef WM_EVENT_COUNTERS
    627 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    628 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    629 
    630 #define WM_Q_EVCNT_INCR(qname, evname)			\
    631 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    632 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    633 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    634 #else /* !WM_EVENT_COUNTERS */
    635 #define	WM_EVCNT_INCR(ev)	/* nothing */
    636 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    637 
    638 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    639 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    640 #endif /* !WM_EVENT_COUNTERS */
    641 
    642 #define	CSR_READ(sc, reg)						\
    643 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    644 #define	CSR_WRITE(sc, reg, val)						\
    645 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    646 #define	CSR_WRITE_FLUSH(sc)						\
    647 	(void)CSR_READ((sc), WMREG_STATUS)
    648 
    649 #define ICH8_FLASH_READ32(sc, reg)					\
    650 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset)
    652 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    653 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    654 	    (reg) + sc->sc_flashreg_offset, (data))
    655 
    656 #define ICH8_FLASH_READ16(sc, reg)					\
    657 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    658 	    (reg) + sc->sc_flashreg_offset)
    659 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    660 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    661 	    (reg) + sc->sc_flashreg_offset, (data))
    662 
    663 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    664 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    665 
    666 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    667 #define	WM_CDTXADDR_HI(txq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    670 
    671 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    672 #define	WM_CDRXADDR_HI(rxq, x)						\
    673 	(sizeof(bus_addr_t) == 8 ?					\
    674 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    675 
    676 /*
    677  * Register read/write functions.
    678  * Other than CSR_{READ|WRITE}().
    679  */
    680 #if 0
    681 static inline uint32_t wm_io_read(struct wm_softc *, int);
    682 #endif
    683 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    684 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    685     uint32_t, uint32_t);
    686 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    687 
    688 /*
    689  * Descriptor sync/init functions.
    690  */
    691 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    692 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    693 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    694 
    695 /*
    696  * Device driver interface functions and commonly used functions.
    697  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    698  */
    699 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    700 static int	wm_match(device_t, cfdata_t, void *);
    701 static void	wm_attach(device_t, device_t, void *);
    702 static int	wm_detach(device_t, int);
    703 static bool	wm_suspend(device_t, const pmf_qual_t *);
    704 static bool	wm_resume(device_t, const pmf_qual_t *);
    705 static void	wm_watchdog(struct ifnet *);
    706 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    709     uint16_t *);
    710 static void	wm_tick(void *);
    711 static int	wm_ifflags_cb(struct ethercom *);
    712 static int	wm_ioctl(struct ifnet *, u_long, void *);
    713 /* MAC address related */
    714 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    715 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    716 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    717 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    718 static int	wm_rar_count(struct wm_softc *);
    719 static void	wm_set_filter(struct wm_softc *);
    720 /* Reset and init related */
    721 static void	wm_set_vlan(struct wm_softc *);
    722 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    723 static void	wm_get_auto_rd_done(struct wm_softc *);
    724 static void	wm_lan_init_done(struct wm_softc *);
    725 static void	wm_get_cfg_done(struct wm_softc *);
    726 static int	wm_phy_post_reset(struct wm_softc *);
    727 static int	wm_write_smbus_addr(struct wm_softc *);
    728 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    729 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    730 static void	wm_initialize_hardware_bits(struct wm_softc *);
    731 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    732 static int	wm_reset_phy(struct wm_softc *);
    733 static void	wm_flush_desc_rings(struct wm_softc *);
    734 static void	wm_reset(struct wm_softc *);
    735 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    736 static void	wm_rxdrain(struct wm_rxqueue *);
    737 static void	wm_init_rss(struct wm_softc *);
    738 static void	wm_adjust_qnum(struct wm_softc *, int);
    739 static inline bool	wm_is_using_msix(struct wm_softc *);
    740 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    741 static int	wm_softint_establish(struct wm_softc *, int, int);
    742 static int	wm_setup_legacy(struct wm_softc *);
    743 static int	wm_setup_msix(struct wm_softc *);
    744 static int	wm_init(struct ifnet *);
    745 static int	wm_init_locked(struct ifnet *);
    746 static void	wm_unset_stopping_flags(struct wm_softc *);
    747 static void	wm_set_stopping_flags(struct wm_softc *);
    748 static void	wm_stop(struct ifnet *, int);
    749 static void	wm_stop_locked(struct ifnet *, int);
    750 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    751 static void	wm_82547_txfifo_stall(void *);
    752 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    753 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    754 /* DMA related */
    755 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    758 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    759     struct wm_txqueue *);
    760 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    762 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    763     struct wm_rxqueue *);
    764 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    766 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    767 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    769 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    770 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_txqueue *);
    772 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    773     struct wm_rxqueue *);
    774 static int	wm_alloc_txrx_queues(struct wm_softc *);
    775 static void	wm_free_txrx_queues(struct wm_softc *);
    776 static int	wm_init_txrx_queues(struct wm_softc *);
    777 /* Start */
    778 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    779     struct wm_txsoft *, uint32_t *, uint8_t *);
    780 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    781 static void	wm_start(struct ifnet *);
    782 static void	wm_start_locked(struct ifnet *);
    783 static int	wm_transmit(struct ifnet *, struct mbuf *);
    784 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    785 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    786     bool);
    787 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    788     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    789 static void	wm_nq_start(struct ifnet *);
    790 static void	wm_nq_start_locked(struct ifnet *);
    791 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    792 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    793 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    794     bool);
    795 static void	wm_deferred_start_locked(struct wm_txqueue *);
    796 static void	wm_handle_queue(void *);
    797 /* Interrupt */
    798 static bool	wm_txeof(struct wm_txqueue *, u_int);
    799 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    800 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    802 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    803 static void	wm_linkintr(struct wm_softc *, uint32_t);
    804 static int	wm_intr_legacy(void *);
    805 static inline void	wm_txrxintr_disable(struct wm_queue *);
    806 static inline void	wm_txrxintr_enable(struct wm_queue *);
    807 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    808 static int	wm_txrxintr_msix(void *);
    809 static int	wm_linkintr_msix(void *);
    810 
    811 /*
    812  * Media related.
    813  * GMII, SGMII, TBI, SERDES and SFP.
    814  */
    815 /* Common */
    816 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    817 /* GMII related */
    818 static void	wm_gmii_reset(struct wm_softc *);
    819 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    820 static int	wm_get_phy_id_82575(struct wm_softc *);
    821 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    822 static int	wm_gmii_mediachange(struct ifnet *);
    823 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    825 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    826 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    827 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    828 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    830 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    831 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    832 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    833 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    834 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    835 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    836 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    837 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    838 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    839 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    840 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    841 	bool);
    842 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    844 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    845 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    846 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    847 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    848 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    849 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    850 static void	wm_gmii_statchg(struct ifnet *);
    851 /*
    852  * kumeran related (80003, ICH* and PCH*).
    853  * These functions are not for accessing MII registers but for accessing
    854  * kumeran specific registers.
    855  */
    856 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    857 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    858 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    859 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    860 /* EMI register related */
    861 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    862 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    863 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    864 /* SGMII */
    865 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    866 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    867 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    868 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    869 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    870 /* TBI related */
    871 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    872 static void	wm_tbi_mediainit(struct wm_softc *);
    873 static int	wm_tbi_mediachange(struct ifnet *);
    874 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    875 static int	wm_check_for_link(struct wm_softc *);
    876 static void	wm_tbi_tick(struct wm_softc *);
    877 /* SERDES related */
    878 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    879 static int	wm_serdes_mediachange(struct ifnet *);
    880 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_serdes_tick(struct wm_softc *);
    882 /* SFP related */
    883 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    884 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    885 
    886 /*
    887  * NVM related.
    888  * Microwire, SPI (w/wo EERD) and Flash.
    889  */
    890 /* Misc functions */
    891 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    892 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    893 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    894 /* Microwire */
    895 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    896 /* SPI */
    897 static int	wm_nvm_ready_spi(struct wm_softc *);
    898 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    899 /* Using with EERD */
    900 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    901 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    902 /* Flash */
    903 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    904     unsigned int *);
    905 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    906 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    907 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    908     uint32_t *);
    909 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    910 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    911 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    912 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    913 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    914 /* iNVM */
    915 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    916 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    917 /* Lock, detecting NVM type, validate checksum and read */
    918 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    919 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    920 static int	wm_nvm_validate_checksum(struct wm_softc *);
    921 static void	wm_nvm_version_invm(struct wm_softc *);
    922 static void	wm_nvm_version(struct wm_softc *);
    923 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    924 
    925 /*
    926  * Hardware semaphores.
    927  * Very complexed...
    928  */
    929 static int	wm_get_null(struct wm_softc *);
    930 static void	wm_put_null(struct wm_softc *);
    931 static int	wm_get_eecd(struct wm_softc *);
    932 static void	wm_put_eecd(struct wm_softc *);
    933 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    934 static void	wm_put_swsm_semaphore(struct wm_softc *);
    935 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    936 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    937 static int	wm_get_nvm_80003(struct wm_softc *);
    938 static void	wm_put_nvm_80003(struct wm_softc *);
    939 static int	wm_get_nvm_82571(struct wm_softc *);
    940 static void	wm_put_nvm_82571(struct wm_softc *);
    941 static int	wm_get_phy_82575(struct wm_softc *);
    942 static void	wm_put_phy_82575(struct wm_softc *);
    943 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    944 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    945 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    946 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    947 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    948 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    949 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    950 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    951 
    952 /*
    953  * Management mode and power management related subroutines.
    954  * BMC, AMT, suspend/resume and EEE.
    955  */
    956 #if 0
    957 static int	wm_check_mng_mode(struct wm_softc *);
    958 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    959 static int	wm_check_mng_mode_82574(struct wm_softc *);
    960 static int	wm_check_mng_mode_generic(struct wm_softc *);
    961 #endif
    962 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    963 static bool	wm_phy_resetisblocked(struct wm_softc *);
    964 static void	wm_get_hw_control(struct wm_softc *);
    965 static void	wm_release_hw_control(struct wm_softc *);
    966 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    967 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    968 static void	wm_init_manageability(struct wm_softc *);
    969 static void	wm_release_manageability(struct wm_softc *);
    970 static void	wm_get_wakeup(struct wm_softc *);
    971 static int	wm_ulp_disable(struct wm_softc *);
    972 static int	wm_enable_phy_wakeup(struct wm_softc *);
    973 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    974 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    975 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    976 static void	wm_enable_wakeup(struct wm_softc *);
    977 static void	wm_disable_aspm(struct wm_softc *);
    978 /* LPLU (Low Power Link Up) */
    979 static void	wm_lplu_d0_disable(struct wm_softc *);
    980 /* EEE */
    981 static int	wm_set_eee_i350(struct wm_softc *);
    982 static int	wm_set_eee_pchlan(struct wm_softc *);
    983 static int	wm_set_eee(struct wm_softc *);
    984 
    985 /*
    986  * Workarounds (mainly PHY related).
    987  * Basically, PHY's workarounds are in the PHY drivers.
    988  */
    989 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    990 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    991 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    993 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    994 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    995 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    996 static int	wm_k1_workaround_lv(struct wm_softc *);
    997 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    998 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    999 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1000 static void	wm_reset_init_script_82575(struct wm_softc *);
   1001 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1002 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1003 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1004 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1005 static int	wm_pll_workaround_i210(struct wm_softc *);
   1006 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1007 
   1008 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1009     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1010 
   1011 /*
   1012  * Devices supported by this driver.
   1013  */
   1014 static const struct wm_product {
   1015 	pci_vendor_id_t		wmp_vendor;
   1016 	pci_product_id_t	wmp_product;
   1017 	const char		*wmp_name;
   1018 	wm_chip_type		wmp_type;
   1019 	uint32_t		wmp_flags;
   1020 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1021 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1022 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1023 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1024 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1025 } wm_products[] = {
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1027 	  "Intel i82542 1000BASE-X Ethernet",
   1028 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1031 	  "Intel i82543GC 1000BASE-X Ethernet",
   1032 	  WM_T_82543,		WMP_F_FIBER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1035 	  "Intel i82543GC 1000BASE-T Ethernet",
   1036 	  WM_T_82543,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1039 	  "Intel i82544EI 1000BASE-T Ethernet",
   1040 	  WM_T_82544,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1043 	  "Intel i82544EI 1000BASE-X Ethernet",
   1044 	  WM_T_82544,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1047 	  "Intel i82544GC 1000BASE-T Ethernet",
   1048 	  WM_T_82544,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1051 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1052 	  WM_T_82544,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1055 	  "Intel i82540EM 1000BASE-T Ethernet",
   1056 	  WM_T_82540,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1059 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1060 	  WM_T_82540,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1063 	  "Intel i82540EP 1000BASE-T Ethernet",
   1064 	  WM_T_82540,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1067 	  "Intel i82540EP 1000BASE-T Ethernet",
   1068 	  WM_T_82540,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1071 	  "Intel i82540EP 1000BASE-T Ethernet",
   1072 	  WM_T_82540,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1075 	  "Intel i82545EM 1000BASE-T Ethernet",
   1076 	  WM_T_82545,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1079 	  "Intel i82545GM 1000BASE-T Ethernet",
   1080 	  WM_T_82545_3,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1083 	  "Intel i82545GM 1000BASE-X Ethernet",
   1084 	  WM_T_82545_3,		WMP_F_FIBER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1087 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1088 	  WM_T_82545_3,		WMP_F_SERDES },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1091 	  "Intel i82546EB 1000BASE-T Ethernet",
   1092 	  WM_T_82546,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1095 	  "Intel i82546EB 1000BASE-T Ethernet",
   1096 	  WM_T_82546,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1099 	  "Intel i82545EM 1000BASE-X Ethernet",
   1100 	  WM_T_82545,		WMP_F_FIBER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1103 	  "Intel i82546EB 1000BASE-X Ethernet",
   1104 	  WM_T_82546,		WMP_F_FIBER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1107 	  "Intel i82546GB 1000BASE-T Ethernet",
   1108 	  WM_T_82546_3,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1111 	  "Intel i82546GB 1000BASE-X Ethernet",
   1112 	  WM_T_82546_3,		WMP_F_FIBER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1115 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1116 	  WM_T_82546_3,		WMP_F_SERDES },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1119 	  "i82546GB quad-port Gigabit Ethernet",
   1120 	  WM_T_82546_3,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1123 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1124 	  WM_T_82546_3,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1127 	  "Intel PRO/1000MT (82546GB)",
   1128 	  WM_T_82546_3,		WMP_F_COPPER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1131 	  "Intel i82541EI 1000BASE-T Ethernet",
   1132 	  WM_T_82541,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1135 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1136 	  WM_T_82541,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1139 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1140 	  WM_T_82541,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1143 	  "Intel i82541ER 1000BASE-T Ethernet",
   1144 	  WM_T_82541_2,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1147 	  "Intel i82541GI 1000BASE-T Ethernet",
   1148 	  WM_T_82541_2,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1151 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1152 	  WM_T_82541_2,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1155 	  "Intel i82541PI 1000BASE-T Ethernet",
   1156 	  WM_T_82541_2,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1159 	  "Intel i82547EI 1000BASE-T Ethernet",
   1160 	  WM_T_82547,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1163 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1164 	  WM_T_82547,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1167 	  "Intel i82547GI 1000BASE-T Ethernet",
   1168 	  WM_T_82547_2,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1171 	  "Intel PRO/1000 PT (82571EB)",
   1172 	  WM_T_82571,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1175 	  "Intel PRO/1000 PF (82571EB)",
   1176 	  WM_T_82571,		WMP_F_FIBER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1179 	  "Intel PRO/1000 PB (82571EB)",
   1180 	  WM_T_82571,		WMP_F_SERDES },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1183 	  "Intel PRO/1000 QT (82571EB)",
   1184 	  WM_T_82571,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1187 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1188 	  WM_T_82571,		WMP_F_COPPER, },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1191 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1192 	  WM_T_82571,		WMP_F_COPPER, },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1195 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_82571,		WMP_F_SERDES, },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1199 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1200 	  WM_T_82571,		WMP_F_SERDES, },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1203 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1204 	  WM_T_82571,		WMP_F_FIBER, },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1207 	  "Intel i82572EI 1000baseT Ethernet",
   1208 	  WM_T_82572,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1211 	  "Intel i82572EI 1000baseX Ethernet",
   1212 	  WM_T_82572,		WMP_F_FIBER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1215 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1216 	  WM_T_82572,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1219 	  "Intel i82572EI 1000baseT Ethernet",
   1220 	  WM_T_82572,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1223 	  "Intel i82573E",
   1224 	  WM_T_82573,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1227 	  "Intel i82573E IAMT",
   1228 	  WM_T_82573,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1231 	  "Intel i82573L Gigabit Ethernet",
   1232 	  WM_T_82573,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1235 	  "Intel i82574L",
   1236 	  WM_T_82574,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1239 	  "Intel i82574L",
   1240 	  WM_T_82574,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1243 	  "Intel i82583V",
   1244 	  WM_T_82583,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1247 	  "i80003 dual 1000baseT Ethernet",
   1248 	  WM_T_80003,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1251 	  "i80003 dual 1000baseX Ethernet",
   1252 	  WM_T_80003,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1255 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1256 	  WM_T_80003,		WMP_F_SERDES },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1259 	  "Intel i80003 1000baseT Ethernet",
   1260 	  WM_T_80003,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1263 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1264 	  WM_T_80003,		WMP_F_SERDES },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1267 	  "Intel i82801H (M_AMT) LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1270 	  "Intel i82801H (AMT) LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1273 	  "Intel i82801H LAN Controller",
   1274 	  WM_T_ICH8,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1276 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1277 	  WM_T_ICH8,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1279 	  "Intel i82801H (M) LAN Controller",
   1280 	  WM_T_ICH8,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1282 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1283 	  WM_T_ICH8,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1285 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1286 	  WM_T_ICH8,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1288 	  "82567V-3 LAN Controller",
   1289 	  WM_T_ICH8,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1291 	  "82801I (AMT) LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1294 	  "82801I 10/100 LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1297 	  "82801I (G) 10/100 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1300 	  "82801I (GT) 10/100 LAN Controller",
   1301 	  WM_T_ICH9,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1303 	  "82801I (C) LAN Controller",
   1304 	  WM_T_ICH9,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1306 	  "82801I mobile LAN Controller",
   1307 	  WM_T_ICH9,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1309 	  "82801I mobile (V) LAN Controller",
   1310 	  WM_T_ICH9,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1312 	  "82801I mobile (AMT) LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1315 	  "82567LM-4 LAN Controller",
   1316 	  WM_T_ICH9,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1318 	  "82567LM-2 LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1321 	  "82567LF-2 LAN Controller",
   1322 	  WM_T_ICH10,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1324 	  "82567LM-3 LAN Controller",
   1325 	  WM_T_ICH10,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1327 	  "82567LF-3 LAN Controller",
   1328 	  WM_T_ICH10,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1330 	  "82567V-2 LAN Controller",
   1331 	  WM_T_ICH10,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1333 	  "82567V-3? LAN Controller",
   1334 	  WM_T_ICH10,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1336 	  "HANKSVILLE LAN Controller",
   1337 	  WM_T_ICH10,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1339 	  "PCH LAN (82577LM) Controller",
   1340 	  WM_T_PCH,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1342 	  "PCH LAN (82577LC) Controller",
   1343 	  WM_T_PCH,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1345 	  "PCH LAN (82578DM) Controller",
   1346 	  WM_T_PCH,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1348 	  "PCH LAN (82578DC) Controller",
   1349 	  WM_T_PCH,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1351 	  "PCH2 LAN (82579LM) Controller",
   1352 	  WM_T_PCH2,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1354 	  "PCH2 LAN (82579V) Controller",
   1355 	  WM_T_PCH2,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1357 	  "82575EB dual-1000baseT Ethernet",
   1358 	  WM_T_82575,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1360 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1361 	  WM_T_82575,		WMP_F_SERDES },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1363 	  "82575GB quad-1000baseT Ethernet",
   1364 	  WM_T_82575,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1366 	  "82575GB quad-1000baseT Ethernet (PM)",
   1367 	  WM_T_82575,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1369 	  "82576 1000BaseT Ethernet",
   1370 	  WM_T_82576,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1372 	  "82576 1000BaseX Ethernet",
   1373 	  WM_T_82576,		WMP_F_FIBER },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1376 	  "82576 gigabit Ethernet (SERDES)",
   1377 	  WM_T_82576,		WMP_F_SERDES },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1380 	  "82576 quad-1000BaseT Ethernet",
   1381 	  WM_T_82576,		WMP_F_COPPER },
   1382 
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1384 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1385 	  WM_T_82576,		WMP_F_COPPER },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1388 	  "82576 gigabit Ethernet",
   1389 	  WM_T_82576,		WMP_F_COPPER },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1392 	  "82576 gigabit Ethernet (SERDES)",
   1393 	  WM_T_82576,		WMP_F_SERDES },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1395 	  "82576 quad-gigabit Ethernet (SERDES)",
   1396 	  WM_T_82576,		WMP_F_SERDES },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1399 	  "82580 1000BaseT Ethernet",
   1400 	  WM_T_82580,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1402 	  "82580 1000BaseX Ethernet",
   1403 	  WM_T_82580,		WMP_F_FIBER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1406 	  "82580 1000BaseT Ethernet (SERDES)",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1410 	  "82580 gigabit Ethernet (SGMII)",
   1411 	  WM_T_82580,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1413 	  "82580 dual-1000BaseT Ethernet",
   1414 	  WM_T_82580,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1417 	  "82580 quad-1000BaseX Ethernet",
   1418 	  WM_T_82580,		WMP_F_FIBER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1421 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1422 	  WM_T_82580,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1425 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1426 	  WM_T_82580,		WMP_F_SERDES },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1429 	  "DH89XXCC 1000BASE-KX Ethernet",
   1430 	  WM_T_82580,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1433 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1434 	  WM_T_82580,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1437 	  "I350 Gigabit Network Connection",
   1438 	  WM_T_I350,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1441 	  "I350 Gigabit Fiber Network Connection",
   1442 	  WM_T_I350,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1445 	  "I350 Gigabit Backplane Connection",
   1446 	  WM_T_I350,		WMP_F_SERDES },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1449 	  "I350 Quad Port Gigabit Ethernet",
   1450 	  WM_T_I350,		WMP_F_SERDES },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1453 	  "I350 Gigabit Connection",
   1454 	  WM_T_I350,		WMP_F_COPPER },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1457 	  "I354 Gigabit Ethernet (KX)",
   1458 	  WM_T_I354,		WMP_F_SERDES },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1461 	  "I354 Gigabit Ethernet (SGMII)",
   1462 	  WM_T_I354,		WMP_F_COPPER },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1465 	  "I354 Gigabit Ethernet (2.5G)",
   1466 	  WM_T_I354,		WMP_F_COPPER },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1469 	  "I210-T1 Ethernet Server Adapter",
   1470 	  WM_T_I210,		WMP_F_COPPER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1473 	  "I210 Ethernet (Copper OEM)",
   1474 	  WM_T_I210,		WMP_F_COPPER },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1477 	  "I210 Ethernet (Copper IT)",
   1478 	  WM_T_I210,		WMP_F_COPPER },
   1479 
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1481 	  "I210 Ethernet (Copper, FLASH less)",
   1482 	  WM_T_I210,		WMP_F_COPPER },
   1483 
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1485 	  "I210 Gigabit Ethernet (Fiber)",
   1486 	  WM_T_I210,		WMP_F_FIBER },
   1487 
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1489 	  "I210 Gigabit Ethernet (SERDES)",
   1490 	  WM_T_I210,		WMP_F_SERDES },
   1491 
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1493 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1494 	  WM_T_I210,		WMP_F_SERDES },
   1495 
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1497 	  "I210 Gigabit Ethernet (SGMII)",
   1498 	  WM_T_I210,		WMP_F_COPPER },
   1499 
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1501 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1502 	  WM_T_I210,		WMP_F_COPPER },
   1503 
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1505 	  "I211 Ethernet (COPPER)",
   1506 	  WM_T_I211,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1508 	  "I217 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1511 	  "I217 LM Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1517 	  "I218 V Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1520 	  "I218 V Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1526 	  "I218 LM Ethernet Connection",
   1527 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1529 	  "I218 LM Ethernet Connection",
   1530 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1532 	  "I219 LM Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1535 	  "I219 LM Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1556 	  "I219 LM Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1559 	  "I219 V Ethernet Connection",
   1560 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1562 	  "I219 V Ethernet Connection",
   1563 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1565 	  "I219 V Ethernet Connection",
   1566 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1568 	  "I219 V Ethernet Connection",
   1569 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1571 	  "I219 V Ethernet Connection",
   1572 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1574 	  "I219 V Ethernet Connection",
   1575 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1577 	  "I219 V Ethernet Connection",
   1578 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1580 	  "I219 V Ethernet Connection",
   1581 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1582 	{ 0,			0,
   1583 	  NULL,
   1584 	  0,			0 },
   1585 };
   1586 
   1587 /*
   1588  * Register read/write functions.
   1589  * Other than CSR_{READ|WRITE}().
   1590  */
   1591 
   1592 #if 0 /* Not currently used */
   1593 static inline uint32_t
   1594 wm_io_read(struct wm_softc *sc, int reg)
   1595 {
   1596 
   1597 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1598 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1599 }
   1600 #endif
   1601 
   1602 static inline void
   1603 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1604 {
   1605 
   1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1607 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1608 }
   1609 
   1610 static inline void
   1611 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1612     uint32_t data)
   1613 {
   1614 	uint32_t regval;
   1615 	int i;
   1616 
   1617 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1618 
   1619 	CSR_WRITE(sc, reg, regval);
   1620 
   1621 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1622 		delay(5);
   1623 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1624 			break;
   1625 	}
   1626 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1627 		aprint_error("%s: WARNING:"
   1628 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1629 		    device_xname(sc->sc_dev), reg);
   1630 	}
   1631 }
   1632 
   1633 static inline void
   1634 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1635 {
   1636 	wa->wa_low = htole32(v & 0xffffffffU);
   1637 	if (sizeof(bus_addr_t) == 8)
   1638 		wa->wa_high = htole32((uint64_t) v >> 32);
   1639 	else
   1640 		wa->wa_high = 0;
   1641 }
   1642 
   1643 /*
   1644  * Descriptor sync/init functions.
   1645  */
   1646 static inline void
   1647 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1648 {
   1649 	struct wm_softc *sc = txq->txq_sc;
   1650 
   1651 	/* If it will wrap around, sync to the end of the ring. */
   1652 	if ((start + num) > WM_NTXDESC(txq)) {
   1653 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1654 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1655 		    (WM_NTXDESC(txq) - start), ops);
   1656 		num -= (WM_NTXDESC(txq) - start);
   1657 		start = 0;
   1658 	}
   1659 
   1660 	/* Now sync whatever is left. */
   1661 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1662 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1663 }
   1664 
   1665 static inline void
   1666 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1667 {
   1668 	struct wm_softc *sc = rxq->rxq_sc;
   1669 
   1670 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1671 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1672 }
   1673 
   1674 static inline void
   1675 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1676 {
   1677 	struct wm_softc *sc = rxq->rxq_sc;
   1678 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1679 	struct mbuf *m = rxs->rxs_mbuf;
   1680 
   1681 	/*
   1682 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1683 	 * so that the payload after the Ethernet header is aligned
   1684 	 * to a 4-byte boundary.
   1685 
   1686 	 * XXX BRAINDAMAGE ALERT!
   1687 	 * The stupid chip uses the same size for every buffer, which
   1688 	 * is set in the Receive Control register.  We are using the 2K
   1689 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1690 	 * reason, we can't "scoot" packets longer than the standard
   1691 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1692 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1693 	 * the upper layer copy the headers.
   1694 	 */
   1695 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1696 
   1697 	if (sc->sc_type == WM_T_82574) {
   1698 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1699 		rxd->erx_data.erxd_addr =
   1700 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1701 		rxd->erx_data.erxd_dd = 0;
   1702 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1703 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1704 
   1705 		rxd->nqrx_data.nrxd_paddr =
   1706 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1707 		/* Currently, split header is not supported. */
   1708 		rxd->nqrx_data.nrxd_haddr = 0;
   1709 	} else {
   1710 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1711 
   1712 		wm_set_dma_addr(&rxd->wrx_addr,
   1713 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1714 		rxd->wrx_len = 0;
   1715 		rxd->wrx_cksum = 0;
   1716 		rxd->wrx_status = 0;
   1717 		rxd->wrx_errors = 0;
   1718 		rxd->wrx_special = 0;
   1719 	}
   1720 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1721 
   1722 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1723 }
   1724 
   1725 /*
   1726  * Device driver interface functions and commonly used functions.
   1727  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1728  */
   1729 
   1730 /* Lookup supported device table */
   1731 static const struct wm_product *
   1732 wm_lookup(const struct pci_attach_args *pa)
   1733 {
   1734 	const struct wm_product *wmp;
   1735 
   1736 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1737 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1738 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1739 			return wmp;
   1740 	}
   1741 	return NULL;
   1742 }
   1743 
   1744 /* The match function (ca_match) */
   1745 static int
   1746 wm_match(device_t parent, cfdata_t cf, void *aux)
   1747 {
   1748 	struct pci_attach_args *pa = aux;
   1749 
   1750 	if (wm_lookup(pa) != NULL)
   1751 		return 1;
   1752 
   1753 	return 0;
   1754 }
   1755 
   1756 /* The attach function (ca_attach) */
   1757 static void
   1758 wm_attach(device_t parent, device_t self, void *aux)
   1759 {
   1760 	struct wm_softc *sc = device_private(self);
   1761 	struct pci_attach_args *pa = aux;
   1762 	prop_dictionary_t dict;
   1763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1764 	pci_chipset_tag_t pc = pa->pa_pc;
   1765 	int counts[PCI_INTR_TYPE_SIZE];
   1766 	pci_intr_type_t max_type;
   1767 	const char *eetype, *xname;
   1768 	bus_space_tag_t memt;
   1769 	bus_space_handle_t memh;
   1770 	bus_size_t memsize;
   1771 	int memh_valid;
   1772 	int i, error;
   1773 	const struct wm_product *wmp;
   1774 	prop_data_t ea;
   1775 	prop_number_t pn;
   1776 	uint8_t enaddr[ETHER_ADDR_LEN];
   1777 	char buf[256];
   1778 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1779 	pcireg_t preg, memtype;
   1780 	uint16_t eeprom_data, apme_mask;
   1781 	bool force_clear_smbi;
   1782 	uint32_t link_mode;
   1783 	uint32_t reg;
   1784 
   1785 	sc->sc_dev = self;
   1786 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1787 	sc->sc_core_stopping = false;
   1788 
   1789 	wmp = wm_lookup(pa);
   1790 #ifdef DIAGNOSTIC
   1791 	if (wmp == NULL) {
   1792 		printf("\n");
   1793 		panic("wm_attach: impossible");
   1794 	}
   1795 #endif
   1796 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1797 
   1798 	sc->sc_pc = pa->pa_pc;
   1799 	sc->sc_pcitag = pa->pa_tag;
   1800 
   1801 	if (pci_dma64_available(pa))
   1802 		sc->sc_dmat = pa->pa_dmat64;
   1803 	else
   1804 		sc->sc_dmat = pa->pa_dmat;
   1805 
   1806 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1807 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1808 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1809 
   1810 	sc->sc_type = wmp->wmp_type;
   1811 
   1812 	/* Set default function pointers */
   1813 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1814 	sc->phy.release = sc->nvm.release = wm_put_null;
   1815 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1816 
   1817 	if (sc->sc_type < WM_T_82543) {
   1818 		if (sc->sc_rev < 2) {
   1819 			aprint_error_dev(sc->sc_dev,
   1820 			    "i82542 must be at least rev. 2\n");
   1821 			return;
   1822 		}
   1823 		if (sc->sc_rev < 3)
   1824 			sc->sc_type = WM_T_82542_2_0;
   1825 	}
   1826 
   1827 	/*
   1828 	 * Disable MSI for Errata:
   1829 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1830 	 *
   1831 	 *  82544: Errata 25
   1832 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1833 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1834 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1835 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1836 	 *
   1837 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1838 	 *
   1839 	 *  82571 & 82572: Errata 63
   1840 	 */
   1841 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1842 	    || (sc->sc_type == WM_T_82572))
   1843 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1844 
   1845 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1846 	    || (sc->sc_type == WM_T_82580)
   1847 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1848 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1849 		sc->sc_flags |= WM_F_NEWQUEUE;
   1850 
   1851 	/* Set device properties (mactype) */
   1852 	dict = device_properties(sc->sc_dev);
   1853 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1854 
   1855 	/*
   1856 	 * Map the device.  All devices support memory-mapped acccess,
   1857 	 * and it is really required for normal operation.
   1858 	 */
   1859 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1860 	switch (memtype) {
   1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1862 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1863 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1864 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1865 		break;
   1866 	default:
   1867 		memh_valid = 0;
   1868 		break;
   1869 	}
   1870 
   1871 	if (memh_valid) {
   1872 		sc->sc_st = memt;
   1873 		sc->sc_sh = memh;
   1874 		sc->sc_ss = memsize;
   1875 	} else {
   1876 		aprint_error_dev(sc->sc_dev,
   1877 		    "unable to map device registers\n");
   1878 		return;
   1879 	}
   1880 
   1881 	/*
   1882 	 * In addition, i82544 and later support I/O mapped indirect
   1883 	 * register access.  It is not desirable (nor supported in
   1884 	 * this driver) to use it for normal operation, though it is
   1885 	 * required to work around bugs in some chip versions.
   1886 	 */
   1887 	if (sc->sc_type >= WM_T_82544) {
   1888 		/* First we have to find the I/O BAR. */
   1889 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1890 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1891 			if (memtype == PCI_MAPREG_TYPE_IO)
   1892 				break;
   1893 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1894 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1895 				i += 4;	/* skip high bits, too */
   1896 		}
   1897 		if (i < PCI_MAPREG_END) {
   1898 			/*
   1899 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1900 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1901 			 * It's no problem because newer chips has no this
   1902 			 * bug.
   1903 			 *
   1904 			 * The i8254x doesn't apparently respond when the
   1905 			 * I/O BAR is 0, which looks somewhat like it's not
   1906 			 * been configured.
   1907 			 */
   1908 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1909 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1910 				aprint_error_dev(sc->sc_dev,
   1911 				    "WARNING: I/O BAR at zero.\n");
   1912 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1913 					0, &sc->sc_iot, &sc->sc_ioh,
   1914 					NULL, &sc->sc_ios) == 0) {
   1915 				sc->sc_flags |= WM_F_IOH_VALID;
   1916 			} else
   1917 				aprint_error_dev(sc->sc_dev,
   1918 				    "WARNING: unable to map I/O space\n");
   1919 		}
   1920 
   1921 	}
   1922 
   1923 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1924 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1925 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1926 	if (sc->sc_type < WM_T_82542_2_1)
   1927 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1928 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1929 
   1930 	/* Power up chip */
   1931 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1932 	    && error != EOPNOTSUPP) {
   1933 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1934 		return;
   1935 	}
   1936 
   1937 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1938 	/*
   1939 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1940 	 * resource.
   1941 	 */
   1942 	if (sc->sc_nqueues > 1) {
   1943 		max_type = PCI_INTR_TYPE_MSIX;
   1944 		/*
   1945 		 *  82583 has a MSI-X capability in the PCI configuration space
   1946 		 * but it doesn't support it. At least the document doesn't
   1947 		 * say anything about MSI-X.
   1948 		 */
   1949 		counts[PCI_INTR_TYPE_MSIX]
   1950 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1951 	} else {
   1952 		max_type = PCI_INTR_TYPE_MSI;
   1953 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1954 	}
   1955 
   1956 	/* Allocation settings */
   1957 	counts[PCI_INTR_TYPE_MSI] = 1;
   1958 	counts[PCI_INTR_TYPE_INTX] = 1;
   1959 	/* overridden by disable flags */
   1960 	if (wm_disable_msi != 0) {
   1961 		counts[PCI_INTR_TYPE_MSI] = 0;
   1962 		if (wm_disable_msix != 0) {
   1963 			max_type = PCI_INTR_TYPE_INTX;
   1964 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1965 		}
   1966 	} else if (wm_disable_msix != 0) {
   1967 		max_type = PCI_INTR_TYPE_MSI;
   1968 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1969 	}
   1970 
   1971 alloc_retry:
   1972 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1973 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1974 		return;
   1975 	}
   1976 
   1977 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1978 		error = wm_setup_msix(sc);
   1979 		if (error) {
   1980 			pci_intr_release(pc, sc->sc_intrs,
   1981 			    counts[PCI_INTR_TYPE_MSIX]);
   1982 
   1983 			/* Setup for MSI: Disable MSI-X */
   1984 			max_type = PCI_INTR_TYPE_MSI;
   1985 			counts[PCI_INTR_TYPE_MSI] = 1;
   1986 			counts[PCI_INTR_TYPE_INTX] = 1;
   1987 			goto alloc_retry;
   1988 		}
   1989 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1990 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   1991 		error = wm_setup_legacy(sc);
   1992 		if (error) {
   1993 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1994 			    counts[PCI_INTR_TYPE_MSI]);
   1995 
   1996 			/* The next try is for INTx: Disable MSI */
   1997 			max_type = PCI_INTR_TYPE_INTX;
   1998 			counts[PCI_INTR_TYPE_INTX] = 1;
   1999 			goto alloc_retry;
   2000 		}
   2001 	} else {
   2002 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2003 		error = wm_setup_legacy(sc);
   2004 		if (error) {
   2005 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2006 			    counts[PCI_INTR_TYPE_INTX]);
   2007 			return;
   2008 		}
   2009 	}
   2010 
   2011 	/*
   2012 	 * Check the function ID (unit number of the chip).
   2013 	 */
   2014 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2015 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2016 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2017 	    || (sc->sc_type == WM_T_82580)
   2018 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2019 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2020 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2021 	else
   2022 		sc->sc_funcid = 0;
   2023 
   2024 	/*
   2025 	 * Determine a few things about the bus we're connected to.
   2026 	 */
   2027 	if (sc->sc_type < WM_T_82543) {
   2028 		/* We don't really know the bus characteristics here. */
   2029 		sc->sc_bus_speed = 33;
   2030 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2031 		/*
   2032 		 * CSA (Communication Streaming Architecture) is about as fast
   2033 		 * a 32-bit 66MHz PCI Bus.
   2034 		 */
   2035 		sc->sc_flags |= WM_F_CSA;
   2036 		sc->sc_bus_speed = 66;
   2037 		aprint_verbose_dev(sc->sc_dev,
   2038 		    "Communication Streaming Architecture\n");
   2039 		if (sc->sc_type == WM_T_82547) {
   2040 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2041 			callout_setfunc(&sc->sc_txfifo_ch,
   2042 			    wm_82547_txfifo_stall, sc);
   2043 			aprint_verbose_dev(sc->sc_dev,
   2044 			    "using 82547 Tx FIFO stall work-around\n");
   2045 		}
   2046 	} else if (sc->sc_type >= WM_T_82571) {
   2047 		sc->sc_flags |= WM_F_PCIE;
   2048 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2049 		    && (sc->sc_type != WM_T_ICH10)
   2050 		    && (sc->sc_type != WM_T_PCH)
   2051 		    && (sc->sc_type != WM_T_PCH2)
   2052 		    && (sc->sc_type != WM_T_PCH_LPT)
   2053 		    && (sc->sc_type != WM_T_PCH_SPT)
   2054 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2055 			/* ICH* and PCH* have no PCIe capability registers */
   2056 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2057 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2058 				NULL) == 0)
   2059 				aprint_error_dev(sc->sc_dev,
   2060 				    "unable to find PCIe capability\n");
   2061 		}
   2062 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2063 	} else {
   2064 		reg = CSR_READ(sc, WMREG_STATUS);
   2065 		if (reg & STATUS_BUS64)
   2066 			sc->sc_flags |= WM_F_BUS64;
   2067 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2068 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2069 
   2070 			sc->sc_flags |= WM_F_PCIX;
   2071 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2072 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2073 				aprint_error_dev(sc->sc_dev,
   2074 				    "unable to find PCIX capability\n");
   2075 			else if (sc->sc_type != WM_T_82545_3 &&
   2076 				 sc->sc_type != WM_T_82546_3) {
   2077 				/*
   2078 				 * Work around a problem caused by the BIOS
   2079 				 * setting the max memory read byte count
   2080 				 * incorrectly.
   2081 				 */
   2082 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2083 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2084 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2085 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2086 
   2087 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2088 				    PCIX_CMD_BYTECNT_SHIFT;
   2089 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2090 				    PCIX_STATUS_MAXB_SHIFT;
   2091 				if (bytecnt > maxb) {
   2092 					aprint_verbose_dev(sc->sc_dev,
   2093 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2094 					    512 << bytecnt, 512 << maxb);
   2095 					pcix_cmd = (pcix_cmd &
   2096 					    ~PCIX_CMD_BYTECNT_MASK) |
   2097 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2098 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2099 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2100 					    pcix_cmd);
   2101 				}
   2102 			}
   2103 		}
   2104 		/*
   2105 		 * The quad port adapter is special; it has a PCIX-PCIX
   2106 		 * bridge on the board, and can run the secondary bus at
   2107 		 * a higher speed.
   2108 		 */
   2109 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2110 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2111 								      : 66;
   2112 		} else if (sc->sc_flags & WM_F_PCIX) {
   2113 			switch (reg & STATUS_PCIXSPD_MASK) {
   2114 			case STATUS_PCIXSPD_50_66:
   2115 				sc->sc_bus_speed = 66;
   2116 				break;
   2117 			case STATUS_PCIXSPD_66_100:
   2118 				sc->sc_bus_speed = 100;
   2119 				break;
   2120 			case STATUS_PCIXSPD_100_133:
   2121 				sc->sc_bus_speed = 133;
   2122 				break;
   2123 			default:
   2124 				aprint_error_dev(sc->sc_dev,
   2125 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2126 				    reg & STATUS_PCIXSPD_MASK);
   2127 				sc->sc_bus_speed = 66;
   2128 				break;
   2129 			}
   2130 		} else
   2131 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2132 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2133 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2134 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2135 	}
   2136 
   2137 	/* clear interesting stat counters */
   2138 	CSR_READ(sc, WMREG_COLC);
   2139 	CSR_READ(sc, WMREG_RXERRC);
   2140 
   2141 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2142 	    || (sc->sc_type >= WM_T_ICH8))
   2143 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2144 	if (sc->sc_type >= WM_T_ICH8)
   2145 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2146 
   2147 	/* Set PHY, NVM mutex related stuff */
   2148 	switch (sc->sc_type) {
   2149 	case WM_T_82542_2_0:
   2150 	case WM_T_82542_2_1:
   2151 	case WM_T_82543:
   2152 	case WM_T_82544:
   2153 		/* Microwire */
   2154 		sc->nvm.read = wm_nvm_read_uwire;
   2155 		sc->sc_nvm_wordsize = 64;
   2156 		sc->sc_nvm_addrbits = 6;
   2157 		break;
   2158 	case WM_T_82540:
   2159 	case WM_T_82545:
   2160 	case WM_T_82545_3:
   2161 	case WM_T_82546:
   2162 	case WM_T_82546_3:
   2163 		/* Microwire */
   2164 		sc->nvm.read = wm_nvm_read_uwire;
   2165 		reg = CSR_READ(sc, WMREG_EECD);
   2166 		if (reg & EECD_EE_SIZE) {
   2167 			sc->sc_nvm_wordsize = 256;
   2168 			sc->sc_nvm_addrbits = 8;
   2169 		} else {
   2170 			sc->sc_nvm_wordsize = 64;
   2171 			sc->sc_nvm_addrbits = 6;
   2172 		}
   2173 		sc->sc_flags |= WM_F_LOCK_EECD;
   2174 		sc->nvm.acquire = wm_get_eecd;
   2175 		sc->nvm.release = wm_put_eecd;
   2176 		break;
   2177 	case WM_T_82541:
   2178 	case WM_T_82541_2:
   2179 	case WM_T_82547:
   2180 	case WM_T_82547_2:
   2181 		reg = CSR_READ(sc, WMREG_EECD);
   2182 		/*
   2183 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2184 		 * on 8254[17], so set flags and functios before calling it.
   2185 		 */
   2186 		sc->sc_flags |= WM_F_LOCK_EECD;
   2187 		sc->nvm.acquire = wm_get_eecd;
   2188 		sc->nvm.release = wm_put_eecd;
   2189 		if (reg & EECD_EE_TYPE) {
   2190 			/* SPI */
   2191 			sc->nvm.read = wm_nvm_read_spi;
   2192 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2193 			wm_nvm_set_addrbits_size_eecd(sc);
   2194 		} else {
   2195 			/* Microwire */
   2196 			sc->nvm.read = wm_nvm_read_uwire;
   2197 			if ((reg & EECD_EE_ABITS) != 0) {
   2198 				sc->sc_nvm_wordsize = 256;
   2199 				sc->sc_nvm_addrbits = 8;
   2200 			} else {
   2201 				sc->sc_nvm_wordsize = 64;
   2202 				sc->sc_nvm_addrbits = 6;
   2203 			}
   2204 		}
   2205 		break;
   2206 	case WM_T_82571:
   2207 	case WM_T_82572:
   2208 		/* SPI */
   2209 		sc->nvm.read = wm_nvm_read_eerd;
   2210 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2211 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2212 		wm_nvm_set_addrbits_size_eecd(sc);
   2213 		sc->phy.acquire = wm_get_swsm_semaphore;
   2214 		sc->phy.release = wm_put_swsm_semaphore;
   2215 		sc->nvm.acquire = wm_get_nvm_82571;
   2216 		sc->nvm.release = wm_put_nvm_82571;
   2217 		break;
   2218 	case WM_T_82573:
   2219 	case WM_T_82574:
   2220 	case WM_T_82583:
   2221 		sc->nvm.read = wm_nvm_read_eerd;
   2222 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2223 		if (sc->sc_type == WM_T_82573) {
   2224 			sc->phy.acquire = wm_get_swsm_semaphore;
   2225 			sc->phy.release = wm_put_swsm_semaphore;
   2226 			sc->nvm.acquire = wm_get_nvm_82571;
   2227 			sc->nvm.release = wm_put_nvm_82571;
   2228 		} else {
   2229 			/* Both PHY and NVM use the same semaphore. */
   2230 			sc->phy.acquire = sc->nvm.acquire
   2231 			    = wm_get_swfwhw_semaphore;
   2232 			sc->phy.release = sc->nvm.release
   2233 			    = wm_put_swfwhw_semaphore;
   2234 		}
   2235 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2236 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2237 			sc->sc_nvm_wordsize = 2048;
   2238 		} else {
   2239 			/* SPI */
   2240 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2241 			wm_nvm_set_addrbits_size_eecd(sc);
   2242 		}
   2243 		break;
   2244 	case WM_T_82575:
   2245 	case WM_T_82576:
   2246 	case WM_T_82580:
   2247 	case WM_T_I350:
   2248 	case WM_T_I354:
   2249 	case WM_T_80003:
   2250 		/* SPI */
   2251 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2252 		wm_nvm_set_addrbits_size_eecd(sc);
   2253 		if ((sc->sc_type == WM_T_80003)
   2254 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2255 			sc->nvm.read = wm_nvm_read_eerd;
   2256 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2257 		} else {
   2258 			sc->nvm.read = wm_nvm_read_spi;
   2259 			sc->sc_flags |= WM_F_LOCK_EECD;
   2260 		}
   2261 		sc->phy.acquire = wm_get_phy_82575;
   2262 		sc->phy.release = wm_put_phy_82575;
   2263 		sc->nvm.acquire = wm_get_nvm_80003;
   2264 		sc->nvm.release = wm_put_nvm_80003;
   2265 		break;
   2266 	case WM_T_ICH8:
   2267 	case WM_T_ICH9:
   2268 	case WM_T_ICH10:
   2269 	case WM_T_PCH:
   2270 	case WM_T_PCH2:
   2271 	case WM_T_PCH_LPT:
   2272 		sc->nvm.read = wm_nvm_read_ich8;
   2273 		/* FLASH */
   2274 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2275 		sc->sc_nvm_wordsize = 2048;
   2276 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2277 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2278 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2279 			aprint_error_dev(sc->sc_dev,
   2280 			    "can't map FLASH registers\n");
   2281 			goto out;
   2282 		}
   2283 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2284 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2285 		    ICH_FLASH_SECTOR_SIZE;
   2286 		sc->sc_ich8_flash_bank_size =
   2287 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2288 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2289 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2290 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2291 		sc->sc_flashreg_offset = 0;
   2292 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2293 		sc->phy.release = wm_put_swflag_ich8lan;
   2294 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2295 		sc->nvm.release = wm_put_nvm_ich8lan;
   2296 		break;
   2297 	case WM_T_PCH_SPT:
   2298 	case WM_T_PCH_CNP:
   2299 		sc->nvm.read = wm_nvm_read_spt;
   2300 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2301 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2302 		sc->sc_flasht = sc->sc_st;
   2303 		sc->sc_flashh = sc->sc_sh;
   2304 		sc->sc_ich8_flash_base = 0;
   2305 		sc->sc_nvm_wordsize =
   2306 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2307 		    * NVM_SIZE_MULTIPLIER;
   2308 		/* It is size in bytes, we want words */
   2309 		sc->sc_nvm_wordsize /= 2;
   2310 		/* Assume 2 banks */
   2311 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2312 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2313 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2314 		sc->phy.release = wm_put_swflag_ich8lan;
   2315 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2316 		sc->nvm.release = wm_put_nvm_ich8lan;
   2317 		break;
   2318 	case WM_T_I210:
   2319 	case WM_T_I211:
   2320 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2321 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2322 		if (wm_nvm_flash_presence_i210(sc)) {
   2323 			sc->nvm.read = wm_nvm_read_eerd;
   2324 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2325 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2326 			wm_nvm_set_addrbits_size_eecd(sc);
   2327 		} else {
   2328 			sc->nvm.read = wm_nvm_read_invm;
   2329 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2330 			sc->sc_nvm_wordsize = INVM_SIZE;
   2331 		}
   2332 		sc->phy.acquire = wm_get_phy_82575;
   2333 		sc->phy.release = wm_put_phy_82575;
   2334 		sc->nvm.acquire = wm_get_nvm_80003;
   2335 		sc->nvm.release = wm_put_nvm_80003;
   2336 		break;
   2337 	default:
   2338 		break;
   2339 	}
   2340 
   2341 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2342 	switch (sc->sc_type) {
   2343 	case WM_T_82571:
   2344 	case WM_T_82572:
   2345 		reg = CSR_READ(sc, WMREG_SWSM2);
   2346 		if ((reg & SWSM2_LOCK) == 0) {
   2347 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2348 			force_clear_smbi = true;
   2349 		} else
   2350 			force_clear_smbi = false;
   2351 		break;
   2352 	case WM_T_82573:
   2353 	case WM_T_82574:
   2354 	case WM_T_82583:
   2355 		force_clear_smbi = true;
   2356 		break;
   2357 	default:
   2358 		force_clear_smbi = false;
   2359 		break;
   2360 	}
   2361 	if (force_clear_smbi) {
   2362 		reg = CSR_READ(sc, WMREG_SWSM);
   2363 		if ((reg & SWSM_SMBI) != 0)
   2364 			aprint_error_dev(sc->sc_dev,
   2365 			    "Please update the Bootagent\n");
   2366 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2367 	}
   2368 
   2369 	/*
   2370 	 * Defer printing the EEPROM type until after verifying the checksum
   2371 	 * This allows the EEPROM type to be printed correctly in the case
   2372 	 * that no EEPROM is attached.
   2373 	 */
   2374 	/*
   2375 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2376 	 * this for later, so we can fail future reads from the EEPROM.
   2377 	 */
   2378 	if (wm_nvm_validate_checksum(sc)) {
   2379 		/*
   2380 		 * Read twice again because some PCI-e parts fail the
   2381 		 * first check due to the link being in sleep state.
   2382 		 */
   2383 		if (wm_nvm_validate_checksum(sc))
   2384 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2385 	}
   2386 
   2387 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2388 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2389 	else {
   2390 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2391 		    sc->sc_nvm_wordsize);
   2392 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2393 			aprint_verbose("iNVM");
   2394 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2395 			aprint_verbose("FLASH(HW)");
   2396 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2397 			aprint_verbose("FLASH");
   2398 		else {
   2399 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2400 				eetype = "SPI";
   2401 			else
   2402 				eetype = "MicroWire";
   2403 			aprint_verbose("(%d address bits) %s EEPROM",
   2404 			    sc->sc_nvm_addrbits, eetype);
   2405 		}
   2406 	}
   2407 	wm_nvm_version(sc);
   2408 	aprint_verbose("\n");
   2409 
   2410 	/*
   2411 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2412 	 * incorrect.
   2413 	 */
   2414 	wm_gmii_setup_phytype(sc, 0, 0);
   2415 
   2416 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2417 	switch (sc->sc_type) {
   2418 	case WM_T_ICH8:
   2419 	case WM_T_ICH9:
   2420 	case WM_T_ICH10:
   2421 	case WM_T_PCH:
   2422 	case WM_T_PCH2:
   2423 	case WM_T_PCH_LPT:
   2424 	case WM_T_PCH_SPT:
   2425 	case WM_T_PCH_CNP:
   2426 		apme_mask = WUC_APME;
   2427 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2428 		if ((eeprom_data & apme_mask) != 0)
   2429 			sc->sc_flags |= WM_F_WOL;
   2430 		break;
   2431 	default:
   2432 		break;
   2433 	}
   2434 
   2435 	/* Reset the chip to a known state. */
   2436 	wm_reset(sc);
   2437 
   2438 	/*
   2439 	 * Check for I21[01] PLL workaround.
   2440 	 *
   2441 	 * Three cases:
   2442 	 * a) Chip is I211.
   2443 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2444 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2445 	 */
   2446 	if (sc->sc_type == WM_T_I211)
   2447 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2448 	if (sc->sc_type == WM_T_I210) {
   2449 		if (!wm_nvm_flash_presence_i210(sc))
   2450 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2451 		else if ((sc->sc_nvm_ver_major < 3)
   2452 		    || ((sc->sc_nvm_ver_major == 3)
   2453 			&& (sc->sc_nvm_ver_minor < 25))) {
   2454 			aprint_verbose_dev(sc->sc_dev,
   2455 			    "ROM image version %d.%d is older than 3.25\n",
   2456 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2457 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2458 		}
   2459 	}
   2460 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2461 		wm_pll_workaround_i210(sc);
   2462 
   2463 	wm_get_wakeup(sc);
   2464 
   2465 	/* Non-AMT based hardware can now take control from firmware */
   2466 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2467 		wm_get_hw_control(sc);
   2468 
   2469 	/*
   2470 	 * Read the Ethernet address from the EEPROM, if not first found
   2471 	 * in device properties.
   2472 	 */
   2473 	ea = prop_dictionary_get(dict, "mac-address");
   2474 	if (ea != NULL) {
   2475 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2476 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2477 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2478 	} else {
   2479 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2480 			aprint_error_dev(sc->sc_dev,
   2481 			    "unable to read Ethernet address\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2487 	    ether_sprintf(enaddr));
   2488 
   2489 	/*
   2490 	 * Read the config info from the EEPROM, and set up various
   2491 	 * bits in the control registers based on their contents.
   2492 	 */
   2493 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2494 	if (pn != NULL) {
   2495 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2496 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2497 	} else {
   2498 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2499 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2500 			goto out;
   2501 		}
   2502 	}
   2503 
   2504 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2505 	if (pn != NULL) {
   2506 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2507 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2508 	} else {
   2509 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2510 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2511 			goto out;
   2512 		}
   2513 	}
   2514 
   2515 	/* check for WM_F_WOL */
   2516 	switch (sc->sc_type) {
   2517 	case WM_T_82542_2_0:
   2518 	case WM_T_82542_2_1:
   2519 	case WM_T_82543:
   2520 		/* dummy? */
   2521 		eeprom_data = 0;
   2522 		apme_mask = NVM_CFG3_APME;
   2523 		break;
   2524 	case WM_T_82544:
   2525 		apme_mask = NVM_CFG2_82544_APM_EN;
   2526 		eeprom_data = cfg2;
   2527 		break;
   2528 	case WM_T_82546:
   2529 	case WM_T_82546_3:
   2530 	case WM_T_82571:
   2531 	case WM_T_82572:
   2532 	case WM_T_82573:
   2533 	case WM_T_82574:
   2534 	case WM_T_82583:
   2535 	case WM_T_80003:
   2536 	case WM_T_82575:
   2537 	case WM_T_82576:
   2538 		apme_mask = NVM_CFG3_APME;
   2539 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2540 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2541 		break;
   2542 	case WM_T_82580:
   2543 	case WM_T_I350:
   2544 	case WM_T_I354:
   2545 	case WM_T_I210:
   2546 	case WM_T_I211:
   2547 		apme_mask = NVM_CFG3_APME;
   2548 		wm_nvm_read(sc,
   2549 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2550 		    1, &eeprom_data);
   2551 		break;
   2552 	case WM_T_ICH8:
   2553 	case WM_T_ICH9:
   2554 	case WM_T_ICH10:
   2555 	case WM_T_PCH:
   2556 	case WM_T_PCH2:
   2557 	case WM_T_PCH_LPT:
   2558 	case WM_T_PCH_SPT:
   2559 	case WM_T_PCH_CNP:
   2560 		/* Already checked before wm_reset () */
   2561 		apme_mask = eeprom_data = 0;
   2562 		break;
   2563 	default: /* XXX 82540 */
   2564 		apme_mask = NVM_CFG3_APME;
   2565 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2566 		break;
   2567 	}
   2568 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2569 	if ((eeprom_data & apme_mask) != 0)
   2570 		sc->sc_flags |= WM_F_WOL;
   2571 
   2572 	/*
   2573 	 * We have the eeprom settings, now apply the special cases
   2574 	 * where the eeprom may be wrong or the board won't support
   2575 	 * wake on lan on a particular port
   2576 	 */
   2577 	switch (sc->sc_pcidevid) {
   2578 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2579 		sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2582 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2583 		/* Wake events only supported on port A for dual fiber
   2584 		 * regardless of eeprom setting */
   2585 		if (sc->sc_funcid == 1)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2589 		/* If quad port adapter, disable WoL on all but port A */
   2590 		if (sc->sc_funcid != 0)
   2591 			sc->sc_flags &= ~WM_F_WOL;
   2592 		break;
   2593 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2594 		/* Wake events only supported on port A for dual fiber
   2595 		 * regardless of eeprom setting */
   2596 		if (sc->sc_funcid == 1)
   2597 			sc->sc_flags &= ~WM_F_WOL;
   2598 		break;
   2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2600 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2601 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2602 		/* If quad port adapter, disable WoL on all but port A */
   2603 		if (sc->sc_funcid != 0)
   2604 			sc->sc_flags &= ~WM_F_WOL;
   2605 		break;
   2606 	}
   2607 
   2608 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2609 		/* Check NVM for autonegotiation */
   2610 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2611 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2612 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2613 		}
   2614 	}
   2615 
   2616 	/*
   2617 	 * XXX need special handling for some multiple port cards
   2618 	 * to disable a paticular port.
   2619 	 */
   2620 
   2621 	if (sc->sc_type >= WM_T_82544) {
   2622 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2623 		if (pn != NULL) {
   2624 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2625 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2626 		} else {
   2627 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2628 				aprint_error_dev(sc->sc_dev,
   2629 				    "unable to read SWDPIN\n");
   2630 				goto out;
   2631 			}
   2632 		}
   2633 	}
   2634 
   2635 	if (cfg1 & NVM_CFG1_ILOS)
   2636 		sc->sc_ctrl |= CTRL_ILOS;
   2637 
   2638 	/*
   2639 	 * XXX
   2640 	 * This code isn't correct because pin 2 and 3 are located
   2641 	 * in different position on newer chips. Check all datasheet.
   2642 	 *
   2643 	 * Until resolve this problem, check if a chip < 82580
   2644 	 */
   2645 	if (sc->sc_type <= WM_T_82580) {
   2646 		if (sc->sc_type >= WM_T_82544) {
   2647 			sc->sc_ctrl |=
   2648 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2649 			    CTRL_SWDPIO_SHIFT;
   2650 			sc->sc_ctrl |=
   2651 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2652 			    CTRL_SWDPINS_SHIFT;
   2653 		} else {
   2654 			sc->sc_ctrl |=
   2655 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2656 			    CTRL_SWDPIO_SHIFT;
   2657 		}
   2658 	}
   2659 
   2660 	/* XXX For other than 82580? */
   2661 	if (sc->sc_type == WM_T_82580) {
   2662 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2663 		if (nvmword & __BIT(13))
   2664 			sc->sc_ctrl |= CTRL_ILOS;
   2665 	}
   2666 
   2667 #if 0
   2668 	if (sc->sc_type >= WM_T_82544) {
   2669 		if (cfg1 & NVM_CFG1_IPS0)
   2670 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2671 		if (cfg1 & NVM_CFG1_IPS1)
   2672 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2673 		sc->sc_ctrl_ext |=
   2674 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2675 		    CTRL_EXT_SWDPIO_SHIFT;
   2676 		sc->sc_ctrl_ext |=
   2677 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2678 		    CTRL_EXT_SWDPINS_SHIFT;
   2679 	} else {
   2680 		sc->sc_ctrl_ext |=
   2681 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2682 		    CTRL_EXT_SWDPIO_SHIFT;
   2683 	}
   2684 #endif
   2685 
   2686 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2687 #if 0
   2688 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2689 #endif
   2690 
   2691 	if (sc->sc_type == WM_T_PCH) {
   2692 		uint16_t val;
   2693 
   2694 		/* Save the NVM K1 bit setting */
   2695 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2696 
   2697 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2698 			sc->sc_nvm_k1_enabled = 1;
   2699 		else
   2700 			sc->sc_nvm_k1_enabled = 0;
   2701 	}
   2702 
   2703 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2704 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2705 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2706 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2707 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2708 	    || sc->sc_type == WM_T_82573
   2709 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2710 		/* Copper only */
   2711 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2712 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2713 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2714 	    || (sc->sc_type ==WM_T_I211)) {
   2715 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2716 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2717 		switch (link_mode) {
   2718 		case CTRL_EXT_LINK_MODE_1000KX:
   2719 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2720 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2721 			break;
   2722 		case CTRL_EXT_LINK_MODE_SGMII:
   2723 			if (wm_sgmii_uses_mdio(sc)) {
   2724 				aprint_verbose_dev(sc->sc_dev,
   2725 				    "SGMII(MDIO)\n");
   2726 				sc->sc_flags |= WM_F_SGMII;
   2727 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2728 				break;
   2729 			}
   2730 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2731 			/*FALLTHROUGH*/
   2732 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2733 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2734 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2735 				if (link_mode
   2736 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2737 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2738 					sc->sc_flags |= WM_F_SGMII;
   2739 				} else {
   2740 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2741 					aprint_verbose_dev(sc->sc_dev,
   2742 					    "SERDES\n");
   2743 				}
   2744 				break;
   2745 			}
   2746 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2747 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2748 
   2749 			/* Change current link mode setting */
   2750 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2751 			switch (sc->sc_mediatype) {
   2752 			case WM_MEDIATYPE_COPPER:
   2753 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2754 				break;
   2755 			case WM_MEDIATYPE_SERDES:
   2756 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2757 				break;
   2758 			default:
   2759 				break;
   2760 			}
   2761 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2762 			break;
   2763 		case CTRL_EXT_LINK_MODE_GMII:
   2764 		default:
   2765 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2766 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2767 			break;
   2768 		}
   2769 
   2770 		reg &= ~CTRL_EXT_I2C_ENA;
   2771 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2772 			reg |= CTRL_EXT_I2C_ENA;
   2773 		else
   2774 			reg &= ~CTRL_EXT_I2C_ENA;
   2775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2776 	} else if (sc->sc_type < WM_T_82543 ||
   2777 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2778 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2779 			aprint_error_dev(sc->sc_dev,
   2780 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2781 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2782 		}
   2783 	} else {
   2784 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2785 			aprint_error_dev(sc->sc_dev,
   2786 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2787 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2788 		}
   2789 	}
   2790 
   2791 	if (sc->sc_type >= WM_T_PCH2)
   2792 		sc->sc_flags |= WM_F_EEE;
   2793 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2794 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2795 		/* XXX: Need special handling for I354. (not yet) */
   2796 		if (sc->sc_type != WM_T_I354)
   2797 			sc->sc_flags |= WM_F_EEE;
   2798 	}
   2799 
   2800 	/* Set device properties (macflags) */
   2801 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2802 
   2803 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2804 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2805 
   2806 	/* Initialize the media structures accordingly. */
   2807 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2808 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2809 	else
   2810 		wm_tbi_mediainit(sc); /* All others */
   2811 
   2812 	ifp = &sc->sc_ethercom.ec_if;
   2813 	xname = device_xname(sc->sc_dev);
   2814 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2815 	ifp->if_softc = sc;
   2816 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2817 #ifdef WM_MPSAFE
   2818 	ifp->if_extflags = IFEF_MPSAFE;
   2819 #endif
   2820 	ifp->if_ioctl = wm_ioctl;
   2821 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2822 		ifp->if_start = wm_nq_start;
   2823 		/*
   2824 		 * When the number of CPUs is one and the controller can use
   2825 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2826 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2827 		 * and the other is used for link status changing.
   2828 		 * In this situation, wm_nq_transmit() is disadvantageous
   2829 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2830 		 */
   2831 		if (wm_is_using_multiqueue(sc))
   2832 			ifp->if_transmit = wm_nq_transmit;
   2833 	} else {
   2834 		ifp->if_start = wm_start;
   2835 		/*
   2836 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2837 		 */
   2838 		if (wm_is_using_multiqueue(sc))
   2839 			ifp->if_transmit = wm_transmit;
   2840 	}
   2841 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2842 	ifp->if_init = wm_init;
   2843 	ifp->if_stop = wm_stop;
   2844 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2845 	IFQ_SET_READY(&ifp->if_snd);
   2846 
   2847 	/* Check for jumbo frame */
   2848 	switch (sc->sc_type) {
   2849 	case WM_T_82573:
   2850 		/* XXX limited to 9234 if ASPM is disabled */
   2851 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2852 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2853 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2854 		break;
   2855 	case WM_T_82571:
   2856 	case WM_T_82572:
   2857 	case WM_T_82574:
   2858 	case WM_T_82583:
   2859 	case WM_T_82575:
   2860 	case WM_T_82576:
   2861 	case WM_T_82580:
   2862 	case WM_T_I350:
   2863 	case WM_T_I354:
   2864 	case WM_T_I210:
   2865 	case WM_T_I211:
   2866 	case WM_T_80003:
   2867 	case WM_T_ICH9:
   2868 	case WM_T_ICH10:
   2869 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2870 	case WM_T_PCH_LPT:
   2871 	case WM_T_PCH_SPT:
   2872 	case WM_T_PCH_CNP:
   2873 		/* XXX limited to 9234 */
   2874 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2875 		break;
   2876 	case WM_T_PCH:
   2877 		/* XXX limited to 4096 */
   2878 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2879 		break;
   2880 	case WM_T_82542_2_0:
   2881 	case WM_T_82542_2_1:
   2882 	case WM_T_ICH8:
   2883 		/* No support for jumbo frame */
   2884 		break;
   2885 	default:
   2886 		/* ETHER_MAX_LEN_JUMBO */
   2887 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2888 		break;
   2889 	}
   2890 
   2891 	/* If we're a i82543 or greater, we can support VLANs. */
   2892 	if (sc->sc_type >= WM_T_82543) {
   2893 		sc->sc_ethercom.ec_capabilities |=
   2894 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2895 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2896 	}
   2897 
   2898 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2899 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2900 
   2901 	/*
   2902 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2903 	 * on i82543 and later.
   2904 	 */
   2905 	if (sc->sc_type >= WM_T_82543) {
   2906 		ifp->if_capabilities |=
   2907 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2908 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2909 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2910 		    IFCAP_CSUM_TCPv6_Tx |
   2911 		    IFCAP_CSUM_UDPv6_Tx;
   2912 	}
   2913 
   2914 	/*
   2915 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2916 	 *
   2917 	 *	82541GI (8086:1076) ... no
   2918 	 *	82572EI (8086:10b9) ... yes
   2919 	 */
   2920 	if (sc->sc_type >= WM_T_82571) {
   2921 		ifp->if_capabilities |=
   2922 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2923 	}
   2924 
   2925 	/*
   2926 	 * If we're a i82544 or greater (except i82547), we can do
   2927 	 * TCP segmentation offload.
   2928 	 */
   2929 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2930 		ifp->if_capabilities |= IFCAP_TSOv4;
   2931 	}
   2932 
   2933 	if (sc->sc_type >= WM_T_82571) {
   2934 		ifp->if_capabilities |= IFCAP_TSOv6;
   2935 	}
   2936 
   2937 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2938 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2939 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2940 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2941 
   2942 #ifdef WM_MPSAFE
   2943 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2944 #else
   2945 	sc->sc_core_lock = NULL;
   2946 #endif
   2947 
   2948 	/* Attach the interface. */
   2949 	error = if_initialize(ifp);
   2950 	if (error != 0) {
   2951 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2952 		    error);
   2953 		return; /* Error */
   2954 	}
   2955 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2956 	ether_ifattach(ifp, enaddr);
   2957 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2958 	if_register(ifp);
   2959 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2960 	    RND_FLAG_DEFAULT);
   2961 
   2962 #ifdef WM_EVENT_COUNTERS
   2963 	/* Attach event counters. */
   2964 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2965 	    NULL, xname, "linkintr");
   2966 
   2967 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2968 	    NULL, xname, "tx_xoff");
   2969 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2970 	    NULL, xname, "tx_xon");
   2971 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2972 	    NULL, xname, "rx_xoff");
   2973 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2974 	    NULL, xname, "rx_xon");
   2975 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2976 	    NULL, xname, "rx_macctl");
   2977 #endif /* WM_EVENT_COUNTERS */
   2978 
   2979 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2980 		pmf_class_network_register(self, ifp);
   2981 	else
   2982 		aprint_error_dev(self, "couldn't establish power handler\n");
   2983 
   2984 	sc->sc_flags |= WM_F_ATTACHED;
   2985 out:
   2986 	return;
   2987 }
   2988 
   2989 /* The detach function (ca_detach) */
   2990 static int
   2991 wm_detach(device_t self, int flags __unused)
   2992 {
   2993 	struct wm_softc *sc = device_private(self);
   2994 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2995 	int i;
   2996 
   2997 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2998 		return 0;
   2999 
   3000 	/* Stop the interface. Callouts are stopped in it. */
   3001 	wm_stop(ifp, 1);
   3002 
   3003 	pmf_device_deregister(self);
   3004 
   3005 #ifdef WM_EVENT_COUNTERS
   3006 	evcnt_detach(&sc->sc_ev_linkintr);
   3007 
   3008 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3009 	evcnt_detach(&sc->sc_ev_tx_xon);
   3010 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3011 	evcnt_detach(&sc->sc_ev_rx_xon);
   3012 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3013 #endif /* WM_EVENT_COUNTERS */
   3014 
   3015 	rnd_detach_source(&sc->rnd_source);
   3016 
   3017 	/* Tell the firmware about the release */
   3018 	WM_CORE_LOCK(sc);
   3019 	wm_release_manageability(sc);
   3020 	wm_release_hw_control(sc);
   3021 	wm_enable_wakeup(sc);
   3022 	WM_CORE_UNLOCK(sc);
   3023 
   3024 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3025 
   3026 	/* Delete all remaining media. */
   3027 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3028 
   3029 	ether_ifdetach(ifp);
   3030 	if_detach(ifp);
   3031 	if_percpuq_destroy(sc->sc_ipq);
   3032 
   3033 	/* Unload RX dmamaps and free mbufs */
   3034 	for (i = 0; i < sc->sc_nqueues; i++) {
   3035 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3036 		mutex_enter(rxq->rxq_lock);
   3037 		wm_rxdrain(rxq);
   3038 		mutex_exit(rxq->rxq_lock);
   3039 	}
   3040 	/* Must unlock here */
   3041 
   3042 	/* Disestablish the interrupt handler */
   3043 	for (i = 0; i < sc->sc_nintrs; i++) {
   3044 		if (sc->sc_ihs[i] != NULL) {
   3045 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3046 			sc->sc_ihs[i] = NULL;
   3047 		}
   3048 	}
   3049 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3050 
   3051 	wm_free_txrx_queues(sc);
   3052 
   3053 	/* Unmap the registers */
   3054 	if (sc->sc_ss) {
   3055 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3056 		sc->sc_ss = 0;
   3057 	}
   3058 	if (sc->sc_ios) {
   3059 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3060 		sc->sc_ios = 0;
   3061 	}
   3062 	if (sc->sc_flashs) {
   3063 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3064 		sc->sc_flashs = 0;
   3065 	}
   3066 
   3067 	if (sc->sc_core_lock)
   3068 		mutex_obj_free(sc->sc_core_lock);
   3069 	if (sc->sc_ich_phymtx)
   3070 		mutex_obj_free(sc->sc_ich_phymtx);
   3071 	if (sc->sc_ich_nvmmtx)
   3072 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3073 
   3074 	return 0;
   3075 }
   3076 
   3077 static bool
   3078 wm_suspend(device_t self, const pmf_qual_t *qual)
   3079 {
   3080 	struct wm_softc *sc = device_private(self);
   3081 
   3082 	wm_release_manageability(sc);
   3083 	wm_release_hw_control(sc);
   3084 	wm_enable_wakeup(sc);
   3085 
   3086 	return true;
   3087 }
   3088 
   3089 static bool
   3090 wm_resume(device_t self, const pmf_qual_t *qual)
   3091 {
   3092 	struct wm_softc *sc = device_private(self);
   3093 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3094 	pcireg_t reg;
   3095 	char buf[256];
   3096 
   3097 	reg = CSR_READ(sc, WMREG_WUS);
   3098 	if (reg != 0) {
   3099 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3100 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3101 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3102 	}
   3103 
   3104 	if (sc->sc_type >= WM_T_PCH2)
   3105 		wm_resume_workarounds_pchlan(sc);
   3106 	if ((ifp->if_flags & IFF_UP) == 0) {
   3107 		wm_reset(sc);
   3108 		/* Non-AMT based hardware can now take control from firmware */
   3109 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3110 			wm_get_hw_control(sc);
   3111 		wm_init_manageability(sc);
   3112 	} else {
   3113 		/*
   3114 		 * We called pmf_class_network_register(), so if_init() is
   3115 		 * automatically called when IFF_UP. wm_reset(),
   3116 		 * wm_get_hw_control() and wm_init_manageability() are called
   3117 		 * via wm_init().
   3118 		 */
   3119 	}
   3120 
   3121 	return true;
   3122 }
   3123 
   3124 /*
   3125  * wm_watchdog:		[ifnet interface function]
   3126  *
   3127  *	Watchdog timer handler.
   3128  */
   3129 static void
   3130 wm_watchdog(struct ifnet *ifp)
   3131 {
   3132 	int qid;
   3133 	struct wm_softc *sc = ifp->if_softc;
   3134 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3135 
   3136 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3137 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3138 
   3139 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3140 	}
   3141 
   3142 	/* IF any of queues hanged up, reset the interface. */
   3143 	if (hang_queue != 0) {
   3144 		(void)wm_init(ifp);
   3145 
   3146 		/*
   3147 		 * There are still some upper layer processing which call
   3148 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3149 		 */
   3150 		/* Try to get more packets going. */
   3151 		ifp->if_start(ifp);
   3152 	}
   3153 }
   3154 
   3155 
   3156 static void
   3157 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3158 {
   3159 
   3160 	mutex_enter(txq->txq_lock);
   3161 	if (txq->txq_sending &&
   3162 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3163 		wm_watchdog_txq_locked(ifp, txq, hang);
   3164 
   3165 	mutex_exit(txq->txq_lock);
   3166 }
   3167 
   3168 static void
   3169 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3170     uint16_t *hang)
   3171 {
   3172 	struct wm_softc *sc = ifp->if_softc;
   3173 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3174 
   3175 	KASSERT(mutex_owned(txq->txq_lock));
   3176 
   3177 	/*
   3178 	 * Since we're using delayed interrupts, sweep up
   3179 	 * before we report an error.
   3180 	 */
   3181 	wm_txeof(txq, UINT_MAX);
   3182 
   3183 	if (txq->txq_sending)
   3184 		*hang |= __BIT(wmq->wmq_id);
   3185 
   3186 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3187 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3188 		    device_xname(sc->sc_dev));
   3189 	} else {
   3190 #ifdef WM_DEBUG
   3191 		int i, j;
   3192 		struct wm_txsoft *txs;
   3193 #endif
   3194 		log(LOG_ERR,
   3195 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3196 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3197 		    txq->txq_next);
   3198 		ifp->if_oerrors++;
   3199 #ifdef WM_DEBUG
   3200 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3201 		    i = WM_NEXTTXS(txq, i)) {
   3202 			txs = &txq->txq_soft[i];
   3203 			printf("txs %d tx %d -> %d\n",
   3204 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3205 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3206 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3207 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3208 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3209 					printf("\t %#08x%08x\n",
   3210 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3211 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3212 				} else {
   3213 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3214 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3215 					    txq->txq_descs[j].wtx_addr.wa_low);
   3216 					printf("\t %#04x%02x%02x%08x\n",
   3217 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3218 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3219 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3220 					    txq->txq_descs[j].wtx_cmdlen);
   3221 				}
   3222 				if (j == txs->txs_lastdesc)
   3223 					break;
   3224 			}
   3225 		}
   3226 #endif
   3227 	}
   3228 }
   3229 
   3230 /*
   3231  * wm_tick:
   3232  *
   3233  *	One second timer, used to check link status, sweep up
   3234  *	completed transmit jobs, etc.
   3235  */
   3236 static void
   3237 wm_tick(void *arg)
   3238 {
   3239 	struct wm_softc *sc = arg;
   3240 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3241 #ifndef WM_MPSAFE
   3242 	int s = splnet();
   3243 #endif
   3244 
   3245 	WM_CORE_LOCK(sc);
   3246 
   3247 	if (sc->sc_core_stopping) {
   3248 		WM_CORE_UNLOCK(sc);
   3249 #ifndef WM_MPSAFE
   3250 		splx(s);
   3251 #endif
   3252 		return;
   3253 	}
   3254 
   3255 	if (sc->sc_type >= WM_T_82542_2_1) {
   3256 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3257 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3258 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3259 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3260 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3261 	}
   3262 
   3263 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3264 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3265 	    + CSR_READ(sc, WMREG_CRCERRS)
   3266 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3267 	    + CSR_READ(sc, WMREG_SYMERRC)
   3268 	    + CSR_READ(sc, WMREG_RXERRC)
   3269 	    + CSR_READ(sc, WMREG_SEC)
   3270 	    + CSR_READ(sc, WMREG_CEXTERR)
   3271 	    + CSR_READ(sc, WMREG_RLEC);
   3272 	/*
   3273 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3274 	 * memory. It does not mean the number of dropped packet. Because
   3275 	 * ethernet controller can receive packets in such case if there is
   3276 	 * space in phy's FIFO.
   3277 	 *
   3278 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3279 	 * own EVCNT instead of if_iqdrops.
   3280 	 */
   3281 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3282 
   3283 	if (sc->sc_flags & WM_F_HAS_MII)
   3284 		mii_tick(&sc->sc_mii);
   3285 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3286 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3287 		wm_serdes_tick(sc);
   3288 	else
   3289 		wm_tbi_tick(sc);
   3290 
   3291 	WM_CORE_UNLOCK(sc);
   3292 
   3293 	wm_watchdog(ifp);
   3294 
   3295 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3296 }
   3297 
   3298 static int
   3299 wm_ifflags_cb(struct ethercom *ec)
   3300 {
   3301 	struct ifnet *ifp = &ec->ec_if;
   3302 	struct wm_softc *sc = ifp->if_softc;
   3303 	int iffchange, ecchange;
   3304 	bool needreset = false;
   3305 	int rc = 0;
   3306 
   3307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3308 		device_xname(sc->sc_dev), __func__));
   3309 
   3310 	WM_CORE_LOCK(sc);
   3311 
   3312 	/*
   3313 	 * Check for if_flags.
   3314 	 * Main usage is to prevent linkdown when opening bpf.
   3315 	 */
   3316 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3317 	sc->sc_if_flags = ifp->if_flags;
   3318 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3319 		needreset = true;
   3320 		goto ec;
   3321 	}
   3322 
   3323 	/* iff related updates */
   3324 	if ((iffchange & IFF_PROMISC) != 0)
   3325 		wm_set_filter(sc);
   3326 
   3327 	wm_set_vlan(sc);
   3328 
   3329 ec:
   3330 	/* Check for ec_capenable. */
   3331 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3332 	sc->sc_ec_capenable = ec->ec_capenable;
   3333 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3334 		needreset = true;
   3335 		goto out;
   3336 	}
   3337 
   3338 	/* ec related updates */
   3339 	wm_set_eee(sc);
   3340 
   3341 out:
   3342 	if (needreset)
   3343 		rc = ENETRESET;
   3344 	WM_CORE_UNLOCK(sc);
   3345 
   3346 	return rc;
   3347 }
   3348 
   3349 /*
   3350  * wm_ioctl:		[ifnet interface function]
   3351  *
   3352  *	Handle control requests from the operator.
   3353  */
   3354 static int
   3355 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3356 {
   3357 	struct wm_softc *sc = ifp->if_softc;
   3358 	struct ifreq *ifr = (struct ifreq *)data;
   3359 	struct ifaddr *ifa = (struct ifaddr *)data;
   3360 	struct sockaddr_dl *sdl;
   3361 	int s, error;
   3362 
   3363 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3364 		device_xname(sc->sc_dev), __func__));
   3365 
   3366 #ifndef WM_MPSAFE
   3367 	s = splnet();
   3368 #endif
   3369 	switch (cmd) {
   3370 	case SIOCSIFMEDIA:
   3371 		WM_CORE_LOCK(sc);
   3372 		/* Flow control requires full-duplex mode. */
   3373 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3374 		    (ifr->ifr_media & IFM_FDX) == 0)
   3375 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3376 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3377 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3378 				/* We can do both TXPAUSE and RXPAUSE. */
   3379 				ifr->ifr_media |=
   3380 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3381 			}
   3382 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3383 		}
   3384 		WM_CORE_UNLOCK(sc);
   3385 #ifdef WM_MPSAFE
   3386 		s = splnet();
   3387 #endif
   3388 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3389 #ifdef WM_MPSAFE
   3390 		splx(s);
   3391 #endif
   3392 		break;
   3393 	case SIOCINITIFADDR:
   3394 		WM_CORE_LOCK(sc);
   3395 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3396 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3397 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3398 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3399 			/* Unicast address is the first multicast entry */
   3400 			wm_set_filter(sc);
   3401 			error = 0;
   3402 			WM_CORE_UNLOCK(sc);
   3403 			break;
   3404 		}
   3405 		WM_CORE_UNLOCK(sc);
   3406 		/*FALLTHROUGH*/
   3407 	default:
   3408 #ifdef WM_MPSAFE
   3409 		s = splnet();
   3410 #endif
   3411 		/* It may call wm_start, so unlock here */
   3412 		error = ether_ioctl(ifp, cmd, data);
   3413 #ifdef WM_MPSAFE
   3414 		splx(s);
   3415 #endif
   3416 		if (error != ENETRESET)
   3417 			break;
   3418 
   3419 		error = 0;
   3420 
   3421 		if (cmd == SIOCSIFCAP)
   3422 			error = (*ifp->if_init)(ifp);
   3423 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3424 			;
   3425 		else if (ifp->if_flags & IFF_RUNNING) {
   3426 			/*
   3427 			 * Multicast list has changed; set the hardware filter
   3428 			 * accordingly.
   3429 			 */
   3430 			WM_CORE_LOCK(sc);
   3431 			wm_set_filter(sc);
   3432 			WM_CORE_UNLOCK(sc);
   3433 		}
   3434 		break;
   3435 	}
   3436 
   3437 #ifndef WM_MPSAFE
   3438 	splx(s);
   3439 #endif
   3440 	return error;
   3441 }
   3442 
   3443 /* MAC address related */
   3444 
   3445 /*
   3446  * Get the offset of MAC address and return it.
   3447  * If error occured, use offset 0.
   3448  */
   3449 static uint16_t
   3450 wm_check_alt_mac_addr(struct wm_softc *sc)
   3451 {
   3452 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3453 	uint16_t offset = NVM_OFF_MACADDR;
   3454 
   3455 	/* Try to read alternative MAC address pointer */
   3456 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3457 		return 0;
   3458 
   3459 	/* Check pointer if it's valid or not. */
   3460 	if ((offset == 0x0000) || (offset == 0xffff))
   3461 		return 0;
   3462 
   3463 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3464 	/*
   3465 	 * Check whether alternative MAC address is valid or not.
   3466 	 * Some cards have non 0xffff pointer but those don't use
   3467 	 * alternative MAC address in reality.
   3468 	 *
   3469 	 * Check whether the broadcast bit is set or not.
   3470 	 */
   3471 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3472 		if (((myea[0] & 0xff) & 0x01) == 0)
   3473 			return offset; /* Found */
   3474 
   3475 	/* Not found */
   3476 	return 0;
   3477 }
   3478 
   3479 static int
   3480 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3481 {
   3482 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3483 	uint16_t offset = NVM_OFF_MACADDR;
   3484 	int do_invert = 0;
   3485 
   3486 	switch (sc->sc_type) {
   3487 	case WM_T_82580:
   3488 	case WM_T_I350:
   3489 	case WM_T_I354:
   3490 		/* EEPROM Top Level Partitioning */
   3491 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3492 		break;
   3493 	case WM_T_82571:
   3494 	case WM_T_82575:
   3495 	case WM_T_82576:
   3496 	case WM_T_80003:
   3497 	case WM_T_I210:
   3498 	case WM_T_I211:
   3499 		offset = wm_check_alt_mac_addr(sc);
   3500 		if (offset == 0)
   3501 			if ((sc->sc_funcid & 0x01) == 1)
   3502 				do_invert = 1;
   3503 		break;
   3504 	default:
   3505 		if ((sc->sc_funcid & 0x01) == 1)
   3506 			do_invert = 1;
   3507 		break;
   3508 	}
   3509 
   3510 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3511 		goto bad;
   3512 
   3513 	enaddr[0] = myea[0] & 0xff;
   3514 	enaddr[1] = myea[0] >> 8;
   3515 	enaddr[2] = myea[1] & 0xff;
   3516 	enaddr[3] = myea[1] >> 8;
   3517 	enaddr[4] = myea[2] & 0xff;
   3518 	enaddr[5] = myea[2] >> 8;
   3519 
   3520 	/*
   3521 	 * Toggle the LSB of the MAC address on the second port
   3522 	 * of some dual port cards.
   3523 	 */
   3524 	if (do_invert != 0)
   3525 		enaddr[5] ^= 1;
   3526 
   3527 	return 0;
   3528 
   3529  bad:
   3530 	return -1;
   3531 }
   3532 
   3533 /*
   3534  * wm_set_ral:
   3535  *
   3536  *	Set an entery in the receive address list.
   3537  */
   3538 static void
   3539 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3540 {
   3541 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3542 	uint32_t wlock_mac;
   3543 	int rv;
   3544 
   3545 	if (enaddr != NULL) {
   3546 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3547 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3548 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3549 		ral_hi |= RAL_AV;
   3550 	} else {
   3551 		ral_lo = 0;
   3552 		ral_hi = 0;
   3553 	}
   3554 
   3555 	switch (sc->sc_type) {
   3556 	case WM_T_82542_2_0:
   3557 	case WM_T_82542_2_1:
   3558 	case WM_T_82543:
   3559 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3560 		CSR_WRITE_FLUSH(sc);
   3561 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3562 		CSR_WRITE_FLUSH(sc);
   3563 		break;
   3564 	case WM_T_PCH2:
   3565 	case WM_T_PCH_LPT:
   3566 	case WM_T_PCH_SPT:
   3567 	case WM_T_PCH_CNP:
   3568 		if (idx == 0) {
   3569 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3570 			CSR_WRITE_FLUSH(sc);
   3571 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3572 			CSR_WRITE_FLUSH(sc);
   3573 			return;
   3574 		}
   3575 		if (sc->sc_type != WM_T_PCH2) {
   3576 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3577 			    FWSM_WLOCK_MAC);
   3578 			addrl = WMREG_SHRAL(idx - 1);
   3579 			addrh = WMREG_SHRAH(idx - 1);
   3580 		} else {
   3581 			wlock_mac = 0;
   3582 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3583 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3584 		}
   3585 
   3586 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3587 			rv = wm_get_swflag_ich8lan(sc);
   3588 			if (rv != 0)
   3589 				return;
   3590 			CSR_WRITE(sc, addrl, ral_lo);
   3591 			CSR_WRITE_FLUSH(sc);
   3592 			CSR_WRITE(sc, addrh, ral_hi);
   3593 			CSR_WRITE_FLUSH(sc);
   3594 			wm_put_swflag_ich8lan(sc);
   3595 		}
   3596 
   3597 		break;
   3598 	default:
   3599 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3600 		CSR_WRITE_FLUSH(sc);
   3601 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3602 		CSR_WRITE_FLUSH(sc);
   3603 		break;
   3604 	}
   3605 }
   3606 
   3607 /*
   3608  * wm_mchash:
   3609  *
   3610  *	Compute the hash of the multicast address for the 4096-bit
   3611  *	multicast filter.
   3612  */
   3613 static uint32_t
   3614 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3615 {
   3616 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3617 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3618 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3619 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3620 	uint32_t hash;
   3621 
   3622 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3623 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3624 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3625 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3626 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3627 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3628 		return (hash & 0x3ff);
   3629 	}
   3630 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3631 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3632 
   3633 	return (hash & 0xfff);
   3634 }
   3635 
   3636 /*
   3637  *
   3638  *
   3639  */
   3640 static int
   3641 wm_rar_count(struct wm_softc *sc)
   3642 {
   3643 	int size;
   3644 
   3645 	switch (sc->sc_type) {
   3646 	case WM_T_ICH8:
   3647 		size = WM_RAL_TABSIZE_ICH8 -1;
   3648 		break;
   3649 	case WM_T_ICH9:
   3650 	case WM_T_ICH10:
   3651 	case WM_T_PCH:
   3652 		size = WM_RAL_TABSIZE_ICH8;
   3653 		break;
   3654 	case WM_T_PCH2:
   3655 		size = WM_RAL_TABSIZE_PCH2;
   3656 		break;
   3657 	case WM_T_PCH_LPT:
   3658 	case WM_T_PCH_SPT:
   3659 	case WM_T_PCH_CNP:
   3660 		size = WM_RAL_TABSIZE_PCH_LPT;
   3661 		break;
   3662 	case WM_T_82575:
   3663 	case WM_T_I210:
   3664 	case WM_T_I211:
   3665 		size = WM_RAL_TABSIZE_82575;
   3666 		break;
   3667 	case WM_T_82576:
   3668 	case WM_T_82580:
   3669 		size = WM_RAL_TABSIZE_82576;
   3670 		break;
   3671 	case WM_T_I350:
   3672 	case WM_T_I354:
   3673 		size = WM_RAL_TABSIZE_I350;
   3674 		break;
   3675 	default:
   3676 		size = WM_RAL_TABSIZE;
   3677 	}
   3678 
   3679 	return size;
   3680 }
   3681 
   3682 /*
   3683  * wm_set_filter:
   3684  *
   3685  *	Set up the receive filter.
   3686  */
   3687 static void
   3688 wm_set_filter(struct wm_softc *sc)
   3689 {
   3690 	struct ethercom *ec = &sc->sc_ethercom;
   3691 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3692 	struct ether_multi *enm;
   3693 	struct ether_multistep step;
   3694 	bus_addr_t mta_reg;
   3695 	uint32_t hash, reg, bit;
   3696 	int i, size, ralmax;
   3697 
   3698 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3699 		device_xname(sc->sc_dev), __func__));
   3700 
   3701 	if (sc->sc_type >= WM_T_82544)
   3702 		mta_reg = WMREG_CORDOVA_MTA;
   3703 	else
   3704 		mta_reg = WMREG_MTA;
   3705 
   3706 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3707 
   3708 	if (ifp->if_flags & IFF_BROADCAST)
   3709 		sc->sc_rctl |= RCTL_BAM;
   3710 	if (ifp->if_flags & IFF_PROMISC) {
   3711 		sc->sc_rctl |= RCTL_UPE;
   3712 		ETHER_LOCK(ec);
   3713 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3714 		ETHER_UNLOCK(ec);
   3715 		goto allmulti;
   3716 	}
   3717 
   3718 	/*
   3719 	 * Set the station address in the first RAL slot, and
   3720 	 * clear the remaining slots.
   3721 	 */
   3722 	size = wm_rar_count(sc);
   3723 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3724 
   3725 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3726 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3727 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3728 		switch (i) {
   3729 		case 0:
   3730 			/* We can use all entries */
   3731 			ralmax = size;
   3732 			break;
   3733 		case 1:
   3734 			/* Only RAR[0] */
   3735 			ralmax = 1;
   3736 			break;
   3737 		default:
   3738 			/* Available SHRA + RAR[0] */
   3739 			ralmax = i + 1;
   3740 		}
   3741 	} else
   3742 		ralmax = size;
   3743 	for (i = 1; i < size; i++) {
   3744 		if (i < ralmax)
   3745 			wm_set_ral(sc, NULL, i);
   3746 	}
   3747 
   3748 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3749 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3750 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3751 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3752 		size = WM_ICH8_MC_TABSIZE;
   3753 	else
   3754 		size = WM_MC_TABSIZE;
   3755 	/* Clear out the multicast table. */
   3756 	for (i = 0; i < size; i++) {
   3757 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3758 		CSR_WRITE_FLUSH(sc);
   3759 	}
   3760 
   3761 	ETHER_LOCK(ec);
   3762 	ETHER_FIRST_MULTI(step, ec, enm);
   3763 	while (enm != NULL) {
   3764 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3765 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3766 			ETHER_UNLOCK(ec);
   3767 			/*
   3768 			 * We must listen to a range of multicast addresses.
   3769 			 * For now, just accept all multicasts, rather than
   3770 			 * trying to set only those filter bits needed to match
   3771 			 * the range.  (At this time, the only use of address
   3772 			 * ranges is for IP multicast routing, for which the
   3773 			 * range is big enough to require all bits set.)
   3774 			 */
   3775 			goto allmulti;
   3776 		}
   3777 
   3778 		hash = wm_mchash(sc, enm->enm_addrlo);
   3779 
   3780 		reg = (hash >> 5);
   3781 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3782 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3783 		    || (sc->sc_type == WM_T_PCH2)
   3784 		    || (sc->sc_type == WM_T_PCH_LPT)
   3785 		    || (sc->sc_type == WM_T_PCH_SPT)
   3786 		    || (sc->sc_type == WM_T_PCH_CNP))
   3787 			reg &= 0x1f;
   3788 		else
   3789 			reg &= 0x7f;
   3790 		bit = hash & 0x1f;
   3791 
   3792 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3793 		hash |= 1U << bit;
   3794 
   3795 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3796 			/*
   3797 			 * 82544 Errata 9: Certain register cannot be written
   3798 			 * with particular alignments in PCI-X bus operation
   3799 			 * (FCAH, MTA and VFTA).
   3800 			 */
   3801 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3802 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3803 			CSR_WRITE_FLUSH(sc);
   3804 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3805 			CSR_WRITE_FLUSH(sc);
   3806 		} else {
   3807 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3808 			CSR_WRITE_FLUSH(sc);
   3809 		}
   3810 
   3811 		ETHER_NEXT_MULTI(step, enm);
   3812 	}
   3813 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3814 	ETHER_UNLOCK(ec);
   3815 
   3816 	goto setit;
   3817 
   3818  allmulti:
   3819 	sc->sc_rctl |= RCTL_MPE;
   3820 
   3821  setit:
   3822 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3823 }
   3824 
   3825 /* Reset and init related */
   3826 
   3827 static void
   3828 wm_set_vlan(struct wm_softc *sc)
   3829 {
   3830 
   3831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3832 		device_xname(sc->sc_dev), __func__));
   3833 
   3834 	/* Deal with VLAN enables. */
   3835 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3836 		sc->sc_ctrl |= CTRL_VME;
   3837 	else
   3838 		sc->sc_ctrl &= ~CTRL_VME;
   3839 
   3840 	/* Write the control registers. */
   3841 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3842 }
   3843 
   3844 static void
   3845 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3846 {
   3847 	uint32_t gcr;
   3848 	pcireg_t ctrl2;
   3849 
   3850 	gcr = CSR_READ(sc, WMREG_GCR);
   3851 
   3852 	/* Only take action if timeout value is defaulted to 0 */
   3853 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3854 		goto out;
   3855 
   3856 	if ((gcr & GCR_CAP_VER2) == 0) {
   3857 		gcr |= GCR_CMPL_TMOUT_10MS;
   3858 		goto out;
   3859 	}
   3860 
   3861 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3862 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3863 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3864 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3865 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3866 
   3867 out:
   3868 	/* Disable completion timeout resend */
   3869 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3870 
   3871 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3872 }
   3873 
   3874 void
   3875 wm_get_auto_rd_done(struct wm_softc *sc)
   3876 {
   3877 	int i;
   3878 
   3879 	/* wait for eeprom to reload */
   3880 	switch (sc->sc_type) {
   3881 	case WM_T_82571:
   3882 	case WM_T_82572:
   3883 	case WM_T_82573:
   3884 	case WM_T_82574:
   3885 	case WM_T_82583:
   3886 	case WM_T_82575:
   3887 	case WM_T_82576:
   3888 	case WM_T_82580:
   3889 	case WM_T_I350:
   3890 	case WM_T_I354:
   3891 	case WM_T_I210:
   3892 	case WM_T_I211:
   3893 	case WM_T_80003:
   3894 	case WM_T_ICH8:
   3895 	case WM_T_ICH9:
   3896 		for (i = 0; i < 10; i++) {
   3897 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3898 				break;
   3899 			delay(1000);
   3900 		}
   3901 		if (i == 10) {
   3902 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3903 			    "complete\n", device_xname(sc->sc_dev));
   3904 		}
   3905 		break;
   3906 	default:
   3907 		break;
   3908 	}
   3909 }
   3910 
   3911 void
   3912 wm_lan_init_done(struct wm_softc *sc)
   3913 {
   3914 	uint32_t reg = 0;
   3915 	int i;
   3916 
   3917 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3918 		device_xname(sc->sc_dev), __func__));
   3919 
   3920 	/* Wait for eeprom to reload */
   3921 	switch (sc->sc_type) {
   3922 	case WM_T_ICH10:
   3923 	case WM_T_PCH:
   3924 	case WM_T_PCH2:
   3925 	case WM_T_PCH_LPT:
   3926 	case WM_T_PCH_SPT:
   3927 	case WM_T_PCH_CNP:
   3928 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3929 			reg = CSR_READ(sc, WMREG_STATUS);
   3930 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3931 				break;
   3932 			delay(100);
   3933 		}
   3934 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3935 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3936 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3937 		}
   3938 		break;
   3939 	default:
   3940 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3941 		    __func__);
   3942 		break;
   3943 	}
   3944 
   3945 	reg &= ~STATUS_LAN_INIT_DONE;
   3946 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3947 }
   3948 
   3949 void
   3950 wm_get_cfg_done(struct wm_softc *sc)
   3951 {
   3952 	int mask;
   3953 	uint32_t reg;
   3954 	int i;
   3955 
   3956 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3957 		device_xname(sc->sc_dev), __func__));
   3958 
   3959 	/* Wait for eeprom to reload */
   3960 	switch (sc->sc_type) {
   3961 	case WM_T_82542_2_0:
   3962 	case WM_T_82542_2_1:
   3963 		/* null */
   3964 		break;
   3965 	case WM_T_82543:
   3966 	case WM_T_82544:
   3967 	case WM_T_82540:
   3968 	case WM_T_82545:
   3969 	case WM_T_82545_3:
   3970 	case WM_T_82546:
   3971 	case WM_T_82546_3:
   3972 	case WM_T_82541:
   3973 	case WM_T_82541_2:
   3974 	case WM_T_82547:
   3975 	case WM_T_82547_2:
   3976 	case WM_T_82573:
   3977 	case WM_T_82574:
   3978 	case WM_T_82583:
   3979 		/* generic */
   3980 		delay(10*1000);
   3981 		break;
   3982 	case WM_T_80003:
   3983 	case WM_T_82571:
   3984 	case WM_T_82572:
   3985 	case WM_T_82575:
   3986 	case WM_T_82576:
   3987 	case WM_T_82580:
   3988 	case WM_T_I350:
   3989 	case WM_T_I354:
   3990 	case WM_T_I210:
   3991 	case WM_T_I211:
   3992 		if (sc->sc_type == WM_T_82571) {
   3993 			/* Only 82571 shares port 0 */
   3994 			mask = EEMNGCTL_CFGDONE_0;
   3995 		} else
   3996 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3997 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3998 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3999 				break;
   4000 			delay(1000);
   4001 		}
   4002 		if (i >= WM_PHY_CFG_TIMEOUT)
   4003 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4004 				device_xname(sc->sc_dev), __func__));
   4005 		break;
   4006 	case WM_T_ICH8:
   4007 	case WM_T_ICH9:
   4008 	case WM_T_ICH10:
   4009 	case WM_T_PCH:
   4010 	case WM_T_PCH2:
   4011 	case WM_T_PCH_LPT:
   4012 	case WM_T_PCH_SPT:
   4013 	case WM_T_PCH_CNP:
   4014 		delay(10*1000);
   4015 		if (sc->sc_type >= WM_T_ICH10)
   4016 			wm_lan_init_done(sc);
   4017 		else
   4018 			wm_get_auto_rd_done(sc);
   4019 
   4020 		/* Clear PHY Reset Asserted bit */
   4021 		reg = CSR_READ(sc, WMREG_STATUS);
   4022 		if ((reg & STATUS_PHYRA) != 0)
   4023 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4024 		break;
   4025 	default:
   4026 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4027 		    __func__);
   4028 		break;
   4029 	}
   4030 }
   4031 
   4032 int
   4033 wm_phy_post_reset(struct wm_softc *sc)
   4034 {
   4035 	device_t dev = sc->sc_dev;
   4036 	uint16_t reg;
   4037 	int rv = 0;
   4038 
   4039 	/* This function is only for ICH8 and newer. */
   4040 	if (sc->sc_type < WM_T_ICH8)
   4041 		return 0;
   4042 
   4043 	if (wm_phy_resetisblocked(sc)) {
   4044 		/* XXX */
   4045 		device_printf(dev, "PHY is blocked\n");
   4046 		return -1;
   4047 	}
   4048 
   4049 	/* Allow time for h/w to get to quiescent state after reset */
   4050 	delay(10*1000);
   4051 
   4052 	/* Perform any necessary post-reset workarounds */
   4053 	if (sc->sc_type == WM_T_PCH)
   4054 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4055 	else if (sc->sc_type == WM_T_PCH2)
   4056 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4057 	if (rv != 0)
   4058 		return rv;
   4059 
   4060 	/* Clear the host wakeup bit after lcd reset */
   4061 	if (sc->sc_type >= WM_T_PCH) {
   4062 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4063 		reg &= ~BM_WUC_HOST_WU_BIT;
   4064 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4065 	}
   4066 
   4067 	/* Configure the LCD with the extended configuration region in NVM */
   4068 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4069 		return rv;
   4070 
   4071 	/* Configure the LCD with the OEM bits in NVM */
   4072 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4073 
   4074 	if (sc->sc_type == WM_T_PCH2) {
   4075 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4076 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4077 			delay(10 * 1000);
   4078 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4079 		}
   4080 		/* Set EEE LPI Update Timer to 200usec */
   4081 		rv = sc->phy.acquire(sc);
   4082 		if (rv)
   4083 			return rv;
   4084 		rv = wm_write_emi_reg_locked(dev,
   4085 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4086 		sc->phy.release(sc);
   4087 	}
   4088 
   4089 	return rv;
   4090 }
   4091 
   4092 /* Only for PCH and newer */
   4093 static int
   4094 wm_write_smbus_addr(struct wm_softc *sc)
   4095 {
   4096 	uint32_t strap, freq;
   4097 	uint16_t phy_data;
   4098 	int rv;
   4099 
   4100 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4101 		device_xname(sc->sc_dev), __func__));
   4102 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4103 
   4104 	strap = CSR_READ(sc, WMREG_STRAP);
   4105 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4106 
   4107 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4108 	if (rv != 0)
   4109 		return -1;
   4110 
   4111 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4112 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4113 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4114 
   4115 	if (sc->sc_phytype == WMPHY_I217) {
   4116 		/* Restore SMBus frequency */
   4117 		if (freq --) {
   4118 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4119 			    | HV_SMB_ADDR_FREQ_HIGH);
   4120 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4121 			    HV_SMB_ADDR_FREQ_LOW);
   4122 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4123 			    HV_SMB_ADDR_FREQ_HIGH);
   4124 		} else
   4125 			DPRINTF(WM_DEBUG_INIT,
   4126 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4127 				device_xname(sc->sc_dev), __func__));
   4128 	}
   4129 
   4130 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4131 	    phy_data);
   4132 }
   4133 
   4134 static int
   4135 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4136 {
   4137 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4138 	uint16_t phy_page = 0;
   4139 	int rv = 0;
   4140 
   4141 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4142 		device_xname(sc->sc_dev), __func__));
   4143 
   4144 	switch (sc->sc_type) {
   4145 	case WM_T_ICH8:
   4146 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4147 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4148 			return 0;
   4149 
   4150 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4151 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4152 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4153 			break;
   4154 		}
   4155 		/* FALLTHROUGH */
   4156 	case WM_T_PCH:
   4157 	case WM_T_PCH2:
   4158 	case WM_T_PCH_LPT:
   4159 	case WM_T_PCH_SPT:
   4160 	case WM_T_PCH_CNP:
   4161 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4162 		break;
   4163 	default:
   4164 		return 0;
   4165 	}
   4166 
   4167 	if ((rv = sc->phy.acquire(sc)) != 0)
   4168 		return rv;
   4169 
   4170 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4171 	if ((reg & sw_cfg_mask) == 0)
   4172 		goto release;
   4173 
   4174 	/*
   4175 	 * Make sure HW does not configure LCD from PHY extended configuration
   4176 	 * before SW configuration
   4177 	 */
   4178 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4179 	if ((sc->sc_type < WM_T_PCH2)
   4180 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4181 		goto release;
   4182 
   4183 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4184 		device_xname(sc->sc_dev), __func__));
   4185 	/* word_addr is in DWORD */
   4186 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4187 
   4188 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4189 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4190 	if (cnf_size == 0)
   4191 		goto release;
   4192 
   4193 	if (((sc->sc_type == WM_T_PCH)
   4194 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4195 	    || (sc->sc_type > WM_T_PCH)) {
   4196 		/*
   4197 		 * HW configures the SMBus address and LEDs when the OEM and
   4198 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4199 		 * are cleared, SW will configure them instead.
   4200 		 */
   4201 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4202 			device_xname(sc->sc_dev), __func__));
   4203 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4204 			goto release;
   4205 
   4206 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4207 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4208 		    (uint16_t)reg);
   4209 		if (rv != 0)
   4210 			goto release;
   4211 	}
   4212 
   4213 	/* Configure LCD from extended configuration region. */
   4214 	for (i = 0; i < cnf_size; i++) {
   4215 		uint16_t reg_data, reg_addr;
   4216 
   4217 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4218 			goto release;
   4219 
   4220 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4221 			goto release;
   4222 
   4223 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4224 			phy_page = reg_data;
   4225 
   4226 		reg_addr &= IGPHY_MAXREGADDR;
   4227 		reg_addr |= phy_page;
   4228 
   4229 		KASSERT(sc->phy.writereg_locked != NULL);
   4230 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4231 		    reg_data);
   4232 	}
   4233 
   4234 release:
   4235 	sc->phy.release(sc);
   4236 	return rv;
   4237 }
   4238 
   4239 /*
   4240  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4241  *  @sc:       pointer to the HW structure
   4242  *  @d0_state: boolean if entering d0 or d3 device state
   4243  *
   4244  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4245  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4246  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4247  */
   4248 int
   4249 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4250 {
   4251 	uint32_t mac_reg;
   4252 	uint16_t oem_reg;
   4253 	int rv;
   4254 
   4255 	if (sc->sc_type < WM_T_PCH)
   4256 		return 0;
   4257 
   4258 	rv = sc->phy.acquire(sc);
   4259 	if (rv != 0)
   4260 		return rv;
   4261 
   4262 	if (sc->sc_type == WM_T_PCH) {
   4263 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4264 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4265 			goto release;
   4266 	}
   4267 
   4268 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4269 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4270 		goto release;
   4271 
   4272 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4273 
   4274 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4275 	if (rv != 0)
   4276 		goto release;
   4277 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4278 
   4279 	if (d0_state) {
   4280 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4281 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4282 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4283 			oem_reg |= HV_OEM_BITS_LPLU;
   4284 	} else {
   4285 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4286 		    != 0)
   4287 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4288 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4289 		    != 0)
   4290 			oem_reg |= HV_OEM_BITS_LPLU;
   4291 	}
   4292 
   4293 	/* Set Restart auto-neg to activate the bits */
   4294 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4295 	    && (wm_phy_resetisblocked(sc) == false))
   4296 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4297 
   4298 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4299 
   4300 release:
   4301 	sc->phy.release(sc);
   4302 
   4303 	return rv;
   4304 }
   4305 
   4306 /* Init hardware bits */
   4307 void
   4308 wm_initialize_hardware_bits(struct wm_softc *sc)
   4309 {
   4310 	uint32_t tarc0, tarc1, reg;
   4311 
   4312 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4313 		device_xname(sc->sc_dev), __func__));
   4314 
   4315 	/* For 82571 variant, 80003 and ICHs */
   4316 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4317 	    || (sc->sc_type >= WM_T_80003)) {
   4318 
   4319 		/* Transmit Descriptor Control 0 */
   4320 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4321 		reg |= TXDCTL_COUNT_DESC;
   4322 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4323 
   4324 		/* Transmit Descriptor Control 1 */
   4325 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4326 		reg |= TXDCTL_COUNT_DESC;
   4327 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4328 
   4329 		/* TARC0 */
   4330 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4331 		switch (sc->sc_type) {
   4332 		case WM_T_82571:
   4333 		case WM_T_82572:
   4334 		case WM_T_82573:
   4335 		case WM_T_82574:
   4336 		case WM_T_82583:
   4337 		case WM_T_80003:
   4338 			/* Clear bits 30..27 */
   4339 			tarc0 &= ~__BITS(30, 27);
   4340 			break;
   4341 		default:
   4342 			break;
   4343 		}
   4344 
   4345 		switch (sc->sc_type) {
   4346 		case WM_T_82571:
   4347 		case WM_T_82572:
   4348 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4349 
   4350 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4351 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4352 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4353 			/* 8257[12] Errata No.7 */
   4354 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4355 
   4356 			/* TARC1 bit 28 */
   4357 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4358 				tarc1 &= ~__BIT(28);
   4359 			else
   4360 				tarc1 |= __BIT(28);
   4361 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4362 
   4363 			/*
   4364 			 * 8257[12] Errata No.13
   4365 			 * Disable Dyamic Clock Gating.
   4366 			 */
   4367 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4368 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4369 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4370 			break;
   4371 		case WM_T_82573:
   4372 		case WM_T_82574:
   4373 		case WM_T_82583:
   4374 			if ((sc->sc_type == WM_T_82574)
   4375 			    || (sc->sc_type == WM_T_82583))
   4376 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4377 
   4378 			/* Extended Device Control */
   4379 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4380 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4381 			reg |= __BIT(22);	/* Set bit 22 */
   4382 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4383 
   4384 			/* Device Control */
   4385 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4386 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4387 
   4388 			/* PCIe Control Register */
   4389 			/*
   4390 			 * 82573 Errata (unknown).
   4391 			 *
   4392 			 * 82574 Errata 25 and 82583 Errata 12
   4393 			 * "Dropped Rx Packets":
   4394 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4395 			 */
   4396 			reg = CSR_READ(sc, WMREG_GCR);
   4397 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4398 			CSR_WRITE(sc, WMREG_GCR, reg);
   4399 
   4400 			if ((sc->sc_type == WM_T_82574)
   4401 			    || (sc->sc_type == WM_T_82583)) {
   4402 				/*
   4403 				 * Document says this bit must be set for
   4404 				 * proper operation.
   4405 				 */
   4406 				reg = CSR_READ(sc, WMREG_GCR);
   4407 				reg |= __BIT(22);
   4408 				CSR_WRITE(sc, WMREG_GCR, reg);
   4409 
   4410 				/*
   4411 				 * Apply workaround for hardware errata
   4412 				 * documented in errata docs Fixes issue where
   4413 				 * some error prone or unreliable PCIe
   4414 				 * completions are occurring, particularly
   4415 				 * with ASPM enabled. Without fix, issue can
   4416 				 * cause Tx timeouts.
   4417 				 */
   4418 				reg = CSR_READ(sc, WMREG_GCR2);
   4419 				reg |= __BIT(0);
   4420 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4421 			}
   4422 			break;
   4423 		case WM_T_80003:
   4424 			/* TARC0 */
   4425 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4426 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4427 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4428 
   4429 			/* TARC1 bit 28 */
   4430 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4431 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4432 				tarc1 &= ~__BIT(28);
   4433 			else
   4434 				tarc1 |= __BIT(28);
   4435 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4436 			break;
   4437 		case WM_T_ICH8:
   4438 		case WM_T_ICH9:
   4439 		case WM_T_ICH10:
   4440 		case WM_T_PCH:
   4441 		case WM_T_PCH2:
   4442 		case WM_T_PCH_LPT:
   4443 		case WM_T_PCH_SPT:
   4444 		case WM_T_PCH_CNP:
   4445 			/* TARC0 */
   4446 			if (sc->sc_type == WM_T_ICH8) {
   4447 				/* Set TARC0 bits 29 and 28 */
   4448 				tarc0 |= __BITS(29, 28);
   4449 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4450 				tarc0 |= __BIT(29);
   4451 				/*
   4452 				 *  Drop bit 28. From Linux.
   4453 				 * See I218/I219 spec update
   4454 				 * "5. Buffer Overrun While the I219 is
   4455 				 * Processing DMA Transactions"
   4456 				 */
   4457 				tarc0 &= ~__BIT(28);
   4458 			}
   4459 			/* Set TARC0 bits 23,24,26,27 */
   4460 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4461 
   4462 			/* CTRL_EXT */
   4463 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4464 			reg |= __BIT(22);	/* Set bit 22 */
   4465 			/*
   4466 			 * Enable PHY low-power state when MAC is at D3
   4467 			 * w/o WoL
   4468 			 */
   4469 			if (sc->sc_type >= WM_T_PCH)
   4470 				reg |= CTRL_EXT_PHYPDEN;
   4471 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4472 
   4473 			/* TARC1 */
   4474 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4475 			/* bit 28 */
   4476 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4477 				tarc1 &= ~__BIT(28);
   4478 			else
   4479 				tarc1 |= __BIT(28);
   4480 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4481 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4482 
   4483 			/* Device Status */
   4484 			if (sc->sc_type == WM_T_ICH8) {
   4485 				reg = CSR_READ(sc, WMREG_STATUS);
   4486 				reg &= ~__BIT(31);
   4487 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4488 
   4489 			}
   4490 
   4491 			/* IOSFPC */
   4492 			if (sc->sc_type == WM_T_PCH_SPT) {
   4493 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4494 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4495 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4496 			}
   4497 			/*
   4498 			 * Work-around descriptor data corruption issue during
   4499 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4500 			 * capability.
   4501 			 */
   4502 			reg = CSR_READ(sc, WMREG_RFCTL);
   4503 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4504 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4505 			break;
   4506 		default:
   4507 			break;
   4508 		}
   4509 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4510 
   4511 		switch (sc->sc_type) {
   4512 		/*
   4513 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4514 		 * Avoid RSS Hash Value bug.
   4515 		 */
   4516 		case WM_T_82571:
   4517 		case WM_T_82572:
   4518 		case WM_T_82573:
   4519 		case WM_T_80003:
   4520 		case WM_T_ICH8:
   4521 			reg = CSR_READ(sc, WMREG_RFCTL);
   4522 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4523 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4524 			break;
   4525 		case WM_T_82574:
   4526 			/* Use extened Rx descriptor. */
   4527 			reg = CSR_READ(sc, WMREG_RFCTL);
   4528 			reg |= WMREG_RFCTL_EXSTEN;
   4529 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4530 			break;
   4531 		default:
   4532 			break;
   4533 		}
   4534 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4535 		/*
   4536 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4537 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4538 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4539 		 * Correctly by the Device"
   4540 		 *
   4541 		 * I354(C2000) Errata AVR53:
   4542 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4543 		 * Hang"
   4544 		 */
   4545 		reg = CSR_READ(sc, WMREG_RFCTL);
   4546 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4547 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4548 	}
   4549 }
   4550 
   4551 static uint32_t
   4552 wm_rxpbs_adjust_82580(uint32_t val)
   4553 {
   4554 	uint32_t rv = 0;
   4555 
   4556 	if (val < __arraycount(wm_82580_rxpbs_table))
   4557 		rv = wm_82580_rxpbs_table[val];
   4558 
   4559 	return rv;
   4560 }
   4561 
   4562 /*
   4563  * wm_reset_phy:
   4564  *
   4565  *	generic PHY reset function.
   4566  *	Same as e1000_phy_hw_reset_generic()
   4567  */
   4568 static int
   4569 wm_reset_phy(struct wm_softc *sc)
   4570 {
   4571 	uint32_t reg;
   4572 
   4573 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4574 		device_xname(sc->sc_dev), __func__));
   4575 	if (wm_phy_resetisblocked(sc))
   4576 		return -1;
   4577 
   4578 	sc->phy.acquire(sc);
   4579 
   4580 	reg = CSR_READ(sc, WMREG_CTRL);
   4581 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4582 	CSR_WRITE_FLUSH(sc);
   4583 
   4584 	delay(sc->phy.reset_delay_us);
   4585 
   4586 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4587 	CSR_WRITE_FLUSH(sc);
   4588 
   4589 	delay(150);
   4590 
   4591 	sc->phy.release(sc);
   4592 
   4593 	wm_get_cfg_done(sc);
   4594 	wm_phy_post_reset(sc);
   4595 
   4596 	return 0;
   4597 }
   4598 
   4599 /*
   4600  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4601  * so it is enough to check sc->sc_queue[0] only.
   4602  */
   4603 static void
   4604 wm_flush_desc_rings(struct wm_softc *sc)
   4605 {
   4606 	pcireg_t preg;
   4607 	uint32_t reg;
   4608 	struct wm_txqueue *txq;
   4609 	wiseman_txdesc_t *txd;
   4610 	int nexttx;
   4611 	uint32_t rctl;
   4612 
   4613 	/* First, disable MULR fix in FEXTNVM11 */
   4614 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4615 	reg |= FEXTNVM11_DIS_MULRFIX;
   4616 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4617 
   4618 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4619 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4620 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4621 		return;
   4622 
   4623 	/* TX */
   4624 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4625 	    device_xname(sc->sc_dev), preg, reg);
   4626 	reg = CSR_READ(sc, WMREG_TCTL);
   4627 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4628 
   4629 	txq = &sc->sc_queue[0].wmq_txq;
   4630 	nexttx = txq->txq_next;
   4631 	txd = &txq->txq_descs[nexttx];
   4632 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4633 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4634 	txd->wtx_fields.wtxu_status = 0;
   4635 	txd->wtx_fields.wtxu_options = 0;
   4636 	txd->wtx_fields.wtxu_vlan = 0;
   4637 
   4638 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4639 	    BUS_SPACE_BARRIER_WRITE);
   4640 
   4641 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4642 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4643 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4644 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4645 	delay(250);
   4646 
   4647 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4648 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4649 		return;
   4650 
   4651 	/* RX */
   4652 	printf("%s: Need RX flush (reg = %08x)\n",
   4653 	    device_xname(sc->sc_dev), preg);
   4654 	rctl = CSR_READ(sc, WMREG_RCTL);
   4655 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4656 	CSR_WRITE_FLUSH(sc);
   4657 	delay(150);
   4658 
   4659 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4660 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4661 	reg &= 0xffffc000;
   4662 	/*
   4663 	 * Update thresholds: prefetch threshold to 31, host threshold
   4664 	 * to 1 and make sure the granularity is "descriptors" and not
   4665 	 * "cache lines"
   4666 	 */
   4667 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4668 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4669 
   4670 	/* Momentarily enable the RX ring for the changes to take effect */
   4671 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4672 	CSR_WRITE_FLUSH(sc);
   4673 	delay(150);
   4674 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4675 }
   4676 
   4677 /*
   4678  * wm_reset:
   4679  *
   4680  *	Reset the i82542 chip.
   4681  */
   4682 static void
   4683 wm_reset(struct wm_softc *sc)
   4684 {
   4685 	int phy_reset = 0;
   4686 	int i, error = 0;
   4687 	uint32_t reg;
   4688 	uint16_t kmreg;
   4689 	int rv;
   4690 
   4691 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4692 		device_xname(sc->sc_dev), __func__));
   4693 	KASSERT(sc->sc_type != 0);
   4694 
   4695 	/*
   4696 	 * Allocate on-chip memory according to the MTU size.
   4697 	 * The Packet Buffer Allocation register must be written
   4698 	 * before the chip is reset.
   4699 	 */
   4700 	switch (sc->sc_type) {
   4701 	case WM_T_82547:
   4702 	case WM_T_82547_2:
   4703 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4704 		    PBA_22K : PBA_30K;
   4705 		for (i = 0; i < sc->sc_nqueues; i++) {
   4706 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4707 			txq->txq_fifo_head = 0;
   4708 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4709 			txq->txq_fifo_size =
   4710 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4711 			txq->txq_fifo_stall = 0;
   4712 		}
   4713 		break;
   4714 	case WM_T_82571:
   4715 	case WM_T_82572:
   4716 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4717 	case WM_T_80003:
   4718 		sc->sc_pba = PBA_32K;
   4719 		break;
   4720 	case WM_T_82573:
   4721 		sc->sc_pba = PBA_12K;
   4722 		break;
   4723 	case WM_T_82574:
   4724 	case WM_T_82583:
   4725 		sc->sc_pba = PBA_20K;
   4726 		break;
   4727 	case WM_T_82576:
   4728 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4729 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4730 		break;
   4731 	case WM_T_82580:
   4732 	case WM_T_I350:
   4733 	case WM_T_I354:
   4734 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4735 		break;
   4736 	case WM_T_I210:
   4737 	case WM_T_I211:
   4738 		sc->sc_pba = PBA_34K;
   4739 		break;
   4740 	case WM_T_ICH8:
   4741 		/* Workaround for a bit corruption issue in FIFO memory */
   4742 		sc->sc_pba = PBA_8K;
   4743 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4744 		break;
   4745 	case WM_T_ICH9:
   4746 	case WM_T_ICH10:
   4747 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4748 		    PBA_14K : PBA_10K;
   4749 		break;
   4750 	case WM_T_PCH:
   4751 	case WM_T_PCH2:	/* XXX 14K? */
   4752 	case WM_T_PCH_LPT:
   4753 	case WM_T_PCH_SPT:
   4754 	case WM_T_PCH_CNP:
   4755 		sc->sc_pba = PBA_26K;
   4756 		break;
   4757 	default:
   4758 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4759 		    PBA_40K : PBA_48K;
   4760 		break;
   4761 	}
   4762 	/*
   4763 	 * Only old or non-multiqueue devices have the PBA register
   4764 	 * XXX Need special handling for 82575.
   4765 	 */
   4766 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4767 	    || (sc->sc_type == WM_T_82575))
   4768 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4769 
   4770 	/* Prevent the PCI-E bus from sticking */
   4771 	if (sc->sc_flags & WM_F_PCIE) {
   4772 		int timeout = 800;
   4773 
   4774 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4775 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4776 
   4777 		while (timeout--) {
   4778 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4779 			    == 0)
   4780 				break;
   4781 			delay(100);
   4782 		}
   4783 		if (timeout == 0)
   4784 			device_printf(sc->sc_dev,
   4785 			    "failed to disable busmastering\n");
   4786 	}
   4787 
   4788 	/* Set the completion timeout for interface */
   4789 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4790 	    || (sc->sc_type == WM_T_82580)
   4791 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4792 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4793 		wm_set_pcie_completion_timeout(sc);
   4794 
   4795 	/* Clear interrupt */
   4796 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4797 	if (wm_is_using_msix(sc)) {
   4798 		if (sc->sc_type != WM_T_82574) {
   4799 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4800 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4801 		} else
   4802 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4803 	}
   4804 
   4805 	/* Stop the transmit and receive processes. */
   4806 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4807 	sc->sc_rctl &= ~RCTL_EN;
   4808 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4809 	CSR_WRITE_FLUSH(sc);
   4810 
   4811 	/* XXX set_tbi_sbp_82543() */
   4812 
   4813 	delay(10*1000);
   4814 
   4815 	/* Must acquire the MDIO ownership before MAC reset */
   4816 	switch (sc->sc_type) {
   4817 	case WM_T_82573:
   4818 	case WM_T_82574:
   4819 	case WM_T_82583:
   4820 		error = wm_get_hw_semaphore_82573(sc);
   4821 		break;
   4822 	default:
   4823 		break;
   4824 	}
   4825 
   4826 	/*
   4827 	 * 82541 Errata 29? & 82547 Errata 28?
   4828 	 * See also the description about PHY_RST bit in CTRL register
   4829 	 * in 8254x_GBe_SDM.pdf.
   4830 	 */
   4831 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4832 		CSR_WRITE(sc, WMREG_CTRL,
   4833 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4834 		CSR_WRITE_FLUSH(sc);
   4835 		delay(5000);
   4836 	}
   4837 
   4838 	switch (sc->sc_type) {
   4839 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4840 	case WM_T_82541:
   4841 	case WM_T_82541_2:
   4842 	case WM_T_82547:
   4843 	case WM_T_82547_2:
   4844 		/*
   4845 		 * On some chipsets, a reset through a memory-mapped write
   4846 		 * cycle can cause the chip to reset before completing the
   4847 		 * write cycle. This causes major headache that can be avoided
   4848 		 * by issuing the reset via indirect register writes through
   4849 		 * I/O space.
   4850 		 *
   4851 		 * So, if we successfully mapped the I/O BAR at attach time,
   4852 		 * use that. Otherwise, try our luck with a memory-mapped
   4853 		 * reset.
   4854 		 */
   4855 		if (sc->sc_flags & WM_F_IOH_VALID)
   4856 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4857 		else
   4858 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4859 		break;
   4860 	case WM_T_82545_3:
   4861 	case WM_T_82546_3:
   4862 		/* Use the shadow control register on these chips. */
   4863 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4864 		break;
   4865 	case WM_T_80003:
   4866 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4867 		sc->phy.acquire(sc);
   4868 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4869 		sc->phy.release(sc);
   4870 		break;
   4871 	case WM_T_ICH8:
   4872 	case WM_T_ICH9:
   4873 	case WM_T_ICH10:
   4874 	case WM_T_PCH:
   4875 	case WM_T_PCH2:
   4876 	case WM_T_PCH_LPT:
   4877 	case WM_T_PCH_SPT:
   4878 	case WM_T_PCH_CNP:
   4879 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4880 		if (wm_phy_resetisblocked(sc) == false) {
   4881 			/*
   4882 			 * Gate automatic PHY configuration by hardware on
   4883 			 * non-managed 82579
   4884 			 */
   4885 			if ((sc->sc_type == WM_T_PCH2)
   4886 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4887 				== 0))
   4888 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4889 
   4890 			reg |= CTRL_PHY_RESET;
   4891 			phy_reset = 1;
   4892 		} else
   4893 			printf("XXX reset is blocked!!!\n");
   4894 		sc->phy.acquire(sc);
   4895 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4896 		/* Don't insert a completion barrier when reset */
   4897 		delay(20*1000);
   4898 		mutex_exit(sc->sc_ich_phymtx);
   4899 		break;
   4900 	case WM_T_82580:
   4901 	case WM_T_I350:
   4902 	case WM_T_I354:
   4903 	case WM_T_I210:
   4904 	case WM_T_I211:
   4905 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4906 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4907 			CSR_WRITE_FLUSH(sc);
   4908 		delay(5000);
   4909 		break;
   4910 	case WM_T_82542_2_0:
   4911 	case WM_T_82542_2_1:
   4912 	case WM_T_82543:
   4913 	case WM_T_82540:
   4914 	case WM_T_82545:
   4915 	case WM_T_82546:
   4916 	case WM_T_82571:
   4917 	case WM_T_82572:
   4918 	case WM_T_82573:
   4919 	case WM_T_82574:
   4920 	case WM_T_82575:
   4921 	case WM_T_82576:
   4922 	case WM_T_82583:
   4923 	default:
   4924 		/* Everything else can safely use the documented method. */
   4925 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4926 		break;
   4927 	}
   4928 
   4929 	/* Must release the MDIO ownership after MAC reset */
   4930 	switch (sc->sc_type) {
   4931 	case WM_T_82573:
   4932 	case WM_T_82574:
   4933 	case WM_T_82583:
   4934 		if (error == 0)
   4935 			wm_put_hw_semaphore_82573(sc);
   4936 		break;
   4937 	default:
   4938 		break;
   4939 	}
   4940 
   4941 	/* Set Phy Config Counter to 50msec */
   4942 	if (sc->sc_type == WM_T_PCH2) {
   4943 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4944 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4945 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4946 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4947 	}
   4948 
   4949 	if (phy_reset != 0)
   4950 		wm_get_cfg_done(sc);
   4951 
   4952 	/* Reload EEPROM */
   4953 	switch (sc->sc_type) {
   4954 	case WM_T_82542_2_0:
   4955 	case WM_T_82542_2_1:
   4956 	case WM_T_82543:
   4957 	case WM_T_82544:
   4958 		delay(10);
   4959 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4960 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4961 		CSR_WRITE_FLUSH(sc);
   4962 		delay(2000);
   4963 		break;
   4964 	case WM_T_82540:
   4965 	case WM_T_82545:
   4966 	case WM_T_82545_3:
   4967 	case WM_T_82546:
   4968 	case WM_T_82546_3:
   4969 		delay(5*1000);
   4970 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4971 		break;
   4972 	case WM_T_82541:
   4973 	case WM_T_82541_2:
   4974 	case WM_T_82547:
   4975 	case WM_T_82547_2:
   4976 		delay(20000);
   4977 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4978 		break;
   4979 	case WM_T_82571:
   4980 	case WM_T_82572:
   4981 	case WM_T_82573:
   4982 	case WM_T_82574:
   4983 	case WM_T_82583:
   4984 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4985 			delay(10);
   4986 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4987 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4988 			CSR_WRITE_FLUSH(sc);
   4989 		}
   4990 		/* check EECD_EE_AUTORD */
   4991 		wm_get_auto_rd_done(sc);
   4992 		/*
   4993 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4994 		 * is set.
   4995 		 */
   4996 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4997 		    || (sc->sc_type == WM_T_82583))
   4998 			delay(25*1000);
   4999 		break;
   5000 	case WM_T_82575:
   5001 	case WM_T_82576:
   5002 	case WM_T_82580:
   5003 	case WM_T_I350:
   5004 	case WM_T_I354:
   5005 	case WM_T_I210:
   5006 	case WM_T_I211:
   5007 	case WM_T_80003:
   5008 		/* check EECD_EE_AUTORD */
   5009 		wm_get_auto_rd_done(sc);
   5010 		break;
   5011 	case WM_T_ICH8:
   5012 	case WM_T_ICH9:
   5013 	case WM_T_ICH10:
   5014 	case WM_T_PCH:
   5015 	case WM_T_PCH2:
   5016 	case WM_T_PCH_LPT:
   5017 	case WM_T_PCH_SPT:
   5018 	case WM_T_PCH_CNP:
   5019 		break;
   5020 	default:
   5021 		panic("%s: unknown type\n", __func__);
   5022 	}
   5023 
   5024 	/* Check whether EEPROM is present or not */
   5025 	switch (sc->sc_type) {
   5026 	case WM_T_82575:
   5027 	case WM_T_82576:
   5028 	case WM_T_82580:
   5029 	case WM_T_I350:
   5030 	case WM_T_I354:
   5031 	case WM_T_ICH8:
   5032 	case WM_T_ICH9:
   5033 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5034 			/* Not found */
   5035 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5036 			if (sc->sc_type == WM_T_82575)
   5037 				wm_reset_init_script_82575(sc);
   5038 		}
   5039 		break;
   5040 	default:
   5041 		break;
   5042 	}
   5043 
   5044 	if (phy_reset != 0)
   5045 		wm_phy_post_reset(sc);
   5046 
   5047 	if ((sc->sc_type == WM_T_82580)
   5048 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5049 		/* Clear global device reset status bit */
   5050 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5051 	}
   5052 
   5053 	/* Clear any pending interrupt events. */
   5054 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5055 	reg = CSR_READ(sc, WMREG_ICR);
   5056 	if (wm_is_using_msix(sc)) {
   5057 		if (sc->sc_type != WM_T_82574) {
   5058 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5059 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5060 		} else
   5061 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5062 	}
   5063 
   5064 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5065 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5066 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5067 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5068 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5069 		reg |= KABGTXD_BGSQLBIAS;
   5070 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5071 	}
   5072 
   5073 	/* Reload sc_ctrl */
   5074 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5075 
   5076 	wm_set_eee(sc);
   5077 
   5078 	/*
   5079 	 * For PCH, this write will make sure that any noise will be detected
   5080 	 * as a CRC error and be dropped rather than show up as a bad packet
   5081 	 * to the DMA engine
   5082 	 */
   5083 	if (sc->sc_type == WM_T_PCH)
   5084 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5085 
   5086 	if (sc->sc_type >= WM_T_82544)
   5087 		CSR_WRITE(sc, WMREG_WUC, 0);
   5088 
   5089 	if (sc->sc_type < WM_T_82575)
   5090 		wm_disable_aspm(sc); /* Workaround for some chips */
   5091 
   5092 	wm_reset_mdicnfg_82580(sc);
   5093 
   5094 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5095 		wm_pll_workaround_i210(sc);
   5096 
   5097 	if (sc->sc_type == WM_T_80003) {
   5098 		/* Default to TRUE to enable the MDIC W/A */
   5099 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5100 
   5101 		rv = wm_kmrn_readreg(sc,
   5102 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5103 		if (rv == 0) {
   5104 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5105 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5106 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5107 			else
   5108 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5109 		}
   5110 	}
   5111 }
   5112 
   5113 /*
   5114  * wm_add_rxbuf:
   5115  *
   5116  *	Add a receive buffer to the indiciated descriptor.
   5117  */
   5118 static int
   5119 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5120 {
   5121 	struct wm_softc *sc = rxq->rxq_sc;
   5122 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5123 	struct mbuf *m;
   5124 	int error;
   5125 
   5126 	KASSERT(mutex_owned(rxq->rxq_lock));
   5127 
   5128 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5129 	if (m == NULL)
   5130 		return ENOBUFS;
   5131 
   5132 	MCLGET(m, M_DONTWAIT);
   5133 	if ((m->m_flags & M_EXT) == 0) {
   5134 		m_freem(m);
   5135 		return ENOBUFS;
   5136 	}
   5137 
   5138 	if (rxs->rxs_mbuf != NULL)
   5139 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5140 
   5141 	rxs->rxs_mbuf = m;
   5142 
   5143 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5144 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5145 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5146 	if (error) {
   5147 		/* XXX XXX XXX */
   5148 		aprint_error_dev(sc->sc_dev,
   5149 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5150 		panic("wm_add_rxbuf");
   5151 	}
   5152 
   5153 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5154 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5155 
   5156 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5157 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5158 			wm_init_rxdesc(rxq, idx);
   5159 	} else
   5160 		wm_init_rxdesc(rxq, idx);
   5161 
   5162 	return 0;
   5163 }
   5164 
   5165 /*
   5166  * wm_rxdrain:
   5167  *
   5168  *	Drain the receive queue.
   5169  */
   5170 static void
   5171 wm_rxdrain(struct wm_rxqueue *rxq)
   5172 {
   5173 	struct wm_softc *sc = rxq->rxq_sc;
   5174 	struct wm_rxsoft *rxs;
   5175 	int i;
   5176 
   5177 	KASSERT(mutex_owned(rxq->rxq_lock));
   5178 
   5179 	for (i = 0; i < WM_NRXDESC; i++) {
   5180 		rxs = &rxq->rxq_soft[i];
   5181 		if (rxs->rxs_mbuf != NULL) {
   5182 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5183 			m_freem(rxs->rxs_mbuf);
   5184 			rxs->rxs_mbuf = NULL;
   5185 		}
   5186 	}
   5187 }
   5188 
   5189 /*
   5190  * Setup registers for RSS.
   5191  *
   5192  * XXX not yet VMDq support
   5193  */
   5194 static void
   5195 wm_init_rss(struct wm_softc *sc)
   5196 {
   5197 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5198 	int i;
   5199 
   5200 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5201 
   5202 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5203 		unsigned int qid, reta_ent;
   5204 
   5205 		qid  = i % sc->sc_nqueues;
   5206 		switch (sc->sc_type) {
   5207 		case WM_T_82574:
   5208 			reta_ent = __SHIFTIN(qid,
   5209 			    RETA_ENT_QINDEX_MASK_82574);
   5210 			break;
   5211 		case WM_T_82575:
   5212 			reta_ent = __SHIFTIN(qid,
   5213 			    RETA_ENT_QINDEX1_MASK_82575);
   5214 			break;
   5215 		default:
   5216 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5217 			break;
   5218 		}
   5219 
   5220 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5221 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5222 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5223 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5224 	}
   5225 
   5226 	rss_getkey((uint8_t *)rss_key);
   5227 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5228 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5229 
   5230 	if (sc->sc_type == WM_T_82574)
   5231 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5232 	else
   5233 		mrqc = MRQC_ENABLE_RSS_MQ;
   5234 
   5235 	/*
   5236 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5237 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5238 	 */
   5239 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5240 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5241 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5242 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5243 
   5244 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5245 }
   5246 
   5247 /*
   5248  * Adjust TX and RX queue numbers which the system actulally uses.
   5249  *
   5250  * The numbers are affected by below parameters.
   5251  *     - The nubmer of hardware queues
   5252  *     - The number of MSI-X vectors (= "nvectors" argument)
   5253  *     - ncpu
   5254  */
   5255 static void
   5256 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5257 {
   5258 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5259 
   5260 	if (nvectors < 2) {
   5261 		sc->sc_nqueues = 1;
   5262 		return;
   5263 	}
   5264 
   5265 	switch (sc->sc_type) {
   5266 	case WM_T_82572:
   5267 		hw_ntxqueues = 2;
   5268 		hw_nrxqueues = 2;
   5269 		break;
   5270 	case WM_T_82574:
   5271 		hw_ntxqueues = 2;
   5272 		hw_nrxqueues = 2;
   5273 		break;
   5274 	case WM_T_82575:
   5275 		hw_ntxqueues = 4;
   5276 		hw_nrxqueues = 4;
   5277 		break;
   5278 	case WM_T_82576:
   5279 		hw_ntxqueues = 16;
   5280 		hw_nrxqueues = 16;
   5281 		break;
   5282 	case WM_T_82580:
   5283 	case WM_T_I350:
   5284 	case WM_T_I354:
   5285 		hw_ntxqueues = 8;
   5286 		hw_nrxqueues = 8;
   5287 		break;
   5288 	case WM_T_I210:
   5289 		hw_ntxqueues = 4;
   5290 		hw_nrxqueues = 4;
   5291 		break;
   5292 	case WM_T_I211:
   5293 		hw_ntxqueues = 2;
   5294 		hw_nrxqueues = 2;
   5295 		break;
   5296 		/*
   5297 		 * As below ethernet controllers does not support MSI-X,
   5298 		 * this driver let them not use multiqueue.
   5299 		 *     - WM_T_80003
   5300 		 *     - WM_T_ICH8
   5301 		 *     - WM_T_ICH9
   5302 		 *     - WM_T_ICH10
   5303 		 *     - WM_T_PCH
   5304 		 *     - WM_T_PCH2
   5305 		 *     - WM_T_PCH_LPT
   5306 		 */
   5307 	default:
   5308 		hw_ntxqueues = 1;
   5309 		hw_nrxqueues = 1;
   5310 		break;
   5311 	}
   5312 
   5313 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5314 
   5315 	/*
   5316 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5317 	 * the number of queues used actually.
   5318 	 */
   5319 	if (nvectors < hw_nqueues + 1)
   5320 		sc->sc_nqueues = nvectors - 1;
   5321 	else
   5322 		sc->sc_nqueues = hw_nqueues;
   5323 
   5324 	/*
   5325 	 * As queues more then cpus cannot improve scaling, we limit
   5326 	 * the number of queues used actually.
   5327 	 */
   5328 	if (ncpu < sc->sc_nqueues)
   5329 		sc->sc_nqueues = ncpu;
   5330 }
   5331 
   5332 static inline bool
   5333 wm_is_using_msix(struct wm_softc *sc)
   5334 {
   5335 
   5336 	return (sc->sc_nintrs > 1);
   5337 }
   5338 
   5339 static inline bool
   5340 wm_is_using_multiqueue(struct wm_softc *sc)
   5341 {
   5342 
   5343 	return (sc->sc_nqueues > 1);
   5344 }
   5345 
   5346 static int
   5347 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5348 {
   5349 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5350 	wmq->wmq_id = qidx;
   5351 	wmq->wmq_intr_idx = intr_idx;
   5352 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5353 #ifdef WM_MPSAFE
   5354 	    | SOFTINT_MPSAFE
   5355 #endif
   5356 	    , wm_handle_queue, wmq);
   5357 	if (wmq->wmq_si != NULL)
   5358 		return 0;
   5359 
   5360 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5361 	    wmq->wmq_id);
   5362 
   5363 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5364 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5365 	return ENOMEM;
   5366 }
   5367 
   5368 /*
   5369  * Both single interrupt MSI and INTx can use this function.
   5370  */
   5371 static int
   5372 wm_setup_legacy(struct wm_softc *sc)
   5373 {
   5374 	pci_chipset_tag_t pc = sc->sc_pc;
   5375 	const char *intrstr = NULL;
   5376 	char intrbuf[PCI_INTRSTR_LEN];
   5377 	int error;
   5378 
   5379 	error = wm_alloc_txrx_queues(sc);
   5380 	if (error) {
   5381 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5382 		    error);
   5383 		return ENOMEM;
   5384 	}
   5385 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5386 	    sizeof(intrbuf));
   5387 #ifdef WM_MPSAFE
   5388 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5389 #endif
   5390 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5391 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5392 	if (sc->sc_ihs[0] == NULL) {
   5393 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5394 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5395 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5396 		return ENOMEM;
   5397 	}
   5398 
   5399 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5400 	sc->sc_nintrs = 1;
   5401 
   5402 	return wm_softint_establish(sc, 0, 0);
   5403 }
   5404 
   5405 static int
   5406 wm_setup_msix(struct wm_softc *sc)
   5407 {
   5408 	void *vih;
   5409 	kcpuset_t *affinity;
   5410 	int qidx, error, intr_idx, txrx_established;
   5411 	pci_chipset_tag_t pc = sc->sc_pc;
   5412 	const char *intrstr = NULL;
   5413 	char intrbuf[PCI_INTRSTR_LEN];
   5414 	char intr_xname[INTRDEVNAMEBUF];
   5415 
   5416 	if (sc->sc_nqueues < ncpu) {
   5417 		/*
   5418 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5419 		 * interrupts start from CPU#1.
   5420 		 */
   5421 		sc->sc_affinity_offset = 1;
   5422 	} else {
   5423 		/*
   5424 		 * In this case, this device use all CPUs. So, we unify
   5425 		 * affinitied cpu_index to msix vector number for readability.
   5426 		 */
   5427 		sc->sc_affinity_offset = 0;
   5428 	}
   5429 
   5430 	error = wm_alloc_txrx_queues(sc);
   5431 	if (error) {
   5432 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5433 		    error);
   5434 		return ENOMEM;
   5435 	}
   5436 
   5437 	kcpuset_create(&affinity, false);
   5438 	intr_idx = 0;
   5439 
   5440 	/*
   5441 	 * TX and RX
   5442 	 */
   5443 	txrx_established = 0;
   5444 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5445 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5446 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5447 
   5448 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5449 		    sizeof(intrbuf));
   5450 #ifdef WM_MPSAFE
   5451 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5452 		    PCI_INTR_MPSAFE, true);
   5453 #endif
   5454 		memset(intr_xname, 0, sizeof(intr_xname));
   5455 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5456 		    device_xname(sc->sc_dev), qidx);
   5457 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5458 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5459 		if (vih == NULL) {
   5460 			aprint_error_dev(sc->sc_dev,
   5461 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5462 			    intrstr ? " at " : "",
   5463 			    intrstr ? intrstr : "");
   5464 
   5465 			goto fail;
   5466 		}
   5467 		kcpuset_zero(affinity);
   5468 		/* Round-robin affinity */
   5469 		kcpuset_set(affinity, affinity_to);
   5470 		error = interrupt_distribute(vih, affinity, NULL);
   5471 		if (error == 0) {
   5472 			aprint_normal_dev(sc->sc_dev,
   5473 			    "for TX and RX interrupting at %s affinity to %u\n",
   5474 			    intrstr, affinity_to);
   5475 		} else {
   5476 			aprint_normal_dev(sc->sc_dev,
   5477 			    "for TX and RX interrupting at %s\n", intrstr);
   5478 		}
   5479 		sc->sc_ihs[intr_idx] = vih;
   5480 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5481 			goto fail;
   5482 		txrx_established++;
   5483 		intr_idx++;
   5484 	}
   5485 
   5486 	/* LINK */
   5487 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5488 	    sizeof(intrbuf));
   5489 #ifdef WM_MPSAFE
   5490 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5491 #endif
   5492 	memset(intr_xname, 0, sizeof(intr_xname));
   5493 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5494 	    device_xname(sc->sc_dev));
   5495 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5496 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5497 	if (vih == NULL) {
   5498 		aprint_error_dev(sc->sc_dev,
   5499 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5500 		    intrstr ? " at " : "",
   5501 		    intrstr ? intrstr : "");
   5502 
   5503 		goto fail;
   5504 	}
   5505 	/* Keep default affinity to LINK interrupt */
   5506 	aprint_normal_dev(sc->sc_dev,
   5507 	    "for LINK interrupting at %s\n", intrstr);
   5508 	sc->sc_ihs[intr_idx] = vih;
   5509 	sc->sc_link_intr_idx = intr_idx;
   5510 
   5511 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5512 	kcpuset_destroy(affinity);
   5513 	return 0;
   5514 
   5515  fail:
   5516 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5517 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5518 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5519 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5520 	}
   5521 
   5522 	kcpuset_destroy(affinity);
   5523 	return ENOMEM;
   5524 }
   5525 
   5526 static void
   5527 wm_unset_stopping_flags(struct wm_softc *sc)
   5528 {
   5529 	int i;
   5530 
   5531 	KASSERT(WM_CORE_LOCKED(sc));
   5532 
   5533 	/* Must unset stopping flags in ascending order. */
   5534 	for (i = 0; i < sc->sc_nqueues; i++) {
   5535 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5536 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5537 
   5538 		mutex_enter(txq->txq_lock);
   5539 		txq->txq_stopping = false;
   5540 		mutex_exit(txq->txq_lock);
   5541 
   5542 		mutex_enter(rxq->rxq_lock);
   5543 		rxq->rxq_stopping = false;
   5544 		mutex_exit(rxq->rxq_lock);
   5545 	}
   5546 
   5547 	sc->sc_core_stopping = false;
   5548 }
   5549 
   5550 static void
   5551 wm_set_stopping_flags(struct wm_softc *sc)
   5552 {
   5553 	int i;
   5554 
   5555 	KASSERT(WM_CORE_LOCKED(sc));
   5556 
   5557 	sc->sc_core_stopping = true;
   5558 
   5559 	/* Must set stopping flags in ascending order. */
   5560 	for (i = 0; i < sc->sc_nqueues; i++) {
   5561 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5562 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5563 
   5564 		mutex_enter(rxq->rxq_lock);
   5565 		rxq->rxq_stopping = true;
   5566 		mutex_exit(rxq->rxq_lock);
   5567 
   5568 		mutex_enter(txq->txq_lock);
   5569 		txq->txq_stopping = true;
   5570 		mutex_exit(txq->txq_lock);
   5571 	}
   5572 }
   5573 
   5574 /*
   5575  * Write interrupt interval value to ITR or EITR
   5576  */
   5577 static void
   5578 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5579 {
   5580 
   5581 	if (!wmq->wmq_set_itr)
   5582 		return;
   5583 
   5584 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5585 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5586 
   5587 		/*
   5588 		 * 82575 doesn't have CNT_INGR field.
   5589 		 * So, overwrite counter field by software.
   5590 		 */
   5591 		if (sc->sc_type == WM_T_82575)
   5592 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5593 		else
   5594 			eitr |= EITR_CNT_INGR;
   5595 
   5596 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5597 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5598 		/*
   5599 		 * 82574 has both ITR and EITR. SET EITR when we use
   5600 		 * the multi queue function with MSI-X.
   5601 		 */
   5602 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5603 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5604 	} else {
   5605 		KASSERT(wmq->wmq_id == 0);
   5606 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5607 	}
   5608 
   5609 	wmq->wmq_set_itr = false;
   5610 }
   5611 
   5612 /*
   5613  * TODO
   5614  * Below dynamic calculation of itr is almost the same as linux igb,
   5615  * however it does not fit to wm(4). So, we will have been disable AIM
   5616  * until we will find appropriate calculation of itr.
   5617  */
   5618 /*
   5619  * calculate interrupt interval value to be going to write register in
   5620  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5621  */
   5622 static void
   5623 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5624 {
   5625 #ifdef NOTYET
   5626 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5627 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5628 	uint32_t avg_size = 0;
   5629 	uint32_t new_itr;
   5630 
   5631 	if (rxq->rxq_packets)
   5632 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5633 	if (txq->txq_packets)
   5634 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5635 
   5636 	if (avg_size == 0) {
   5637 		new_itr = 450; /* restore default value */
   5638 		goto out;
   5639 	}
   5640 
   5641 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5642 	avg_size += 24;
   5643 
   5644 	/* Don't starve jumbo frames */
   5645 	avg_size = uimin(avg_size, 3000);
   5646 
   5647 	/* Give a little boost to mid-size frames */
   5648 	if ((avg_size > 300) && (avg_size < 1200))
   5649 		new_itr = avg_size / 3;
   5650 	else
   5651 		new_itr = avg_size / 2;
   5652 
   5653 out:
   5654 	/*
   5655 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5656 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5657 	 */
   5658 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5659 		new_itr *= 4;
   5660 
   5661 	if (new_itr != wmq->wmq_itr) {
   5662 		wmq->wmq_itr = new_itr;
   5663 		wmq->wmq_set_itr = true;
   5664 	} else
   5665 		wmq->wmq_set_itr = false;
   5666 
   5667 	rxq->rxq_packets = 0;
   5668 	rxq->rxq_bytes = 0;
   5669 	txq->txq_packets = 0;
   5670 	txq->txq_bytes = 0;
   5671 #endif
   5672 }
   5673 
   5674 /*
   5675  * wm_init:		[ifnet interface function]
   5676  *
   5677  *	Initialize the interface.
   5678  */
   5679 static int
   5680 wm_init(struct ifnet *ifp)
   5681 {
   5682 	struct wm_softc *sc = ifp->if_softc;
   5683 	int ret;
   5684 
   5685 	WM_CORE_LOCK(sc);
   5686 	ret = wm_init_locked(ifp);
   5687 	WM_CORE_UNLOCK(sc);
   5688 
   5689 	return ret;
   5690 }
   5691 
   5692 static int
   5693 wm_init_locked(struct ifnet *ifp)
   5694 {
   5695 	struct wm_softc *sc = ifp->if_softc;
   5696 	struct ethercom *ec = &sc->sc_ethercom;
   5697 	int i, j, trynum, error = 0;
   5698 	uint32_t reg;
   5699 
   5700 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5701 		device_xname(sc->sc_dev), __func__));
   5702 	KASSERT(WM_CORE_LOCKED(sc));
   5703 
   5704 	/*
   5705 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5706 	 * There is a small but measurable benefit to avoiding the adjusment
   5707 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5708 	 * on such platforms.  One possibility is that the DMA itself is
   5709 	 * slightly more efficient if the front of the entire packet (instead
   5710 	 * of the front of the headers) is aligned.
   5711 	 *
   5712 	 * Note we must always set align_tweak to 0 if we are using
   5713 	 * jumbo frames.
   5714 	 */
   5715 #ifdef __NO_STRICT_ALIGNMENT
   5716 	sc->sc_align_tweak = 0;
   5717 #else
   5718 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5719 		sc->sc_align_tweak = 0;
   5720 	else
   5721 		sc->sc_align_tweak = 2;
   5722 #endif /* __NO_STRICT_ALIGNMENT */
   5723 
   5724 	/* Cancel any pending I/O. */
   5725 	wm_stop_locked(ifp, 0);
   5726 
   5727 	/* Update statistics before reset */
   5728 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5729 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5730 
   5731 	/* PCH_SPT hardware workaround */
   5732 	if (sc->sc_type == WM_T_PCH_SPT)
   5733 		wm_flush_desc_rings(sc);
   5734 
   5735 	/* Reset the chip to a known state. */
   5736 	wm_reset(sc);
   5737 
   5738 	/*
   5739 	 * AMT based hardware can now take control from firmware
   5740 	 * Do this after reset.
   5741 	 */
   5742 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5743 		wm_get_hw_control(sc);
   5744 
   5745 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5746 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5747 		wm_legacy_irq_quirk_spt(sc);
   5748 
   5749 	/* Init hardware bits */
   5750 	wm_initialize_hardware_bits(sc);
   5751 
   5752 	/* Reset the PHY. */
   5753 	if (sc->sc_flags & WM_F_HAS_MII)
   5754 		wm_gmii_reset(sc);
   5755 
   5756 	if (sc->sc_type >= WM_T_ICH8) {
   5757 		reg = CSR_READ(sc, WMREG_GCR);
   5758 		/*
   5759 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5760 		 * default after reset.
   5761 		 */
   5762 		if (sc->sc_type == WM_T_ICH8)
   5763 			reg |= GCR_NO_SNOOP_ALL;
   5764 		else
   5765 			reg &= ~GCR_NO_SNOOP_ALL;
   5766 		CSR_WRITE(sc, WMREG_GCR, reg);
   5767 	}
   5768 	if ((sc->sc_type >= WM_T_ICH8)
   5769 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5770 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5771 
   5772 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5773 		reg |= CTRL_EXT_RO_DIS;
   5774 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5775 	}
   5776 
   5777 	/* Calculate (E)ITR value */
   5778 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5779 		/*
   5780 		 * For NEWQUEUE's EITR (except for 82575).
   5781 		 * 82575's EITR should be set same throttling value as other
   5782 		 * old controllers' ITR because the interrupt/sec calculation
   5783 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5784 		 *
   5785 		 * 82574's EITR should be set same throttling value as ITR.
   5786 		 *
   5787 		 * For N interrupts/sec, set this value to:
   5788 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5789 		 */
   5790 		sc->sc_itr_init = 450;
   5791 	} else if (sc->sc_type >= WM_T_82543) {
   5792 		/*
   5793 		 * Set up the interrupt throttling register (units of 256ns)
   5794 		 * Note that a footnote in Intel's documentation says this
   5795 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5796 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5797 		 * that that is also true for the 1024ns units of the other
   5798 		 * interrupt-related timer registers -- so, really, we ought
   5799 		 * to divide this value by 4 when the link speed is low.
   5800 		 *
   5801 		 * XXX implement this division at link speed change!
   5802 		 */
   5803 
   5804 		/*
   5805 		 * For N interrupts/sec, set this value to:
   5806 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5807 		 * absolute and packet timer values to this value
   5808 		 * divided by 4 to get "simple timer" behavior.
   5809 		 */
   5810 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5811 	}
   5812 
   5813 	error = wm_init_txrx_queues(sc);
   5814 	if (error)
   5815 		goto out;
   5816 
   5817 	/* Clear out the VLAN table -- we don't use it (yet). */
   5818 	CSR_WRITE(sc, WMREG_VET, 0);
   5819 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5820 		trynum = 10; /* Due to hw errata */
   5821 	else
   5822 		trynum = 1;
   5823 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5824 		for (j = 0; j < trynum; j++)
   5825 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5826 
   5827 	/*
   5828 	 * Set up flow-control parameters.
   5829 	 *
   5830 	 * XXX Values could probably stand some tuning.
   5831 	 */
   5832 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5833 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5834 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5835 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5836 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5837 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5838 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5839 	}
   5840 
   5841 	sc->sc_fcrtl = FCRTL_DFLT;
   5842 	if (sc->sc_type < WM_T_82543) {
   5843 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5844 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5845 	} else {
   5846 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5847 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5848 	}
   5849 
   5850 	if (sc->sc_type == WM_T_80003)
   5851 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5852 	else
   5853 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5854 
   5855 	/* Writes the control register. */
   5856 	wm_set_vlan(sc);
   5857 
   5858 	if (sc->sc_flags & WM_F_HAS_MII) {
   5859 		uint16_t kmreg;
   5860 
   5861 		switch (sc->sc_type) {
   5862 		case WM_T_80003:
   5863 		case WM_T_ICH8:
   5864 		case WM_T_ICH9:
   5865 		case WM_T_ICH10:
   5866 		case WM_T_PCH:
   5867 		case WM_T_PCH2:
   5868 		case WM_T_PCH_LPT:
   5869 		case WM_T_PCH_SPT:
   5870 		case WM_T_PCH_CNP:
   5871 			/*
   5872 			 * Set the mac to wait the maximum time between each
   5873 			 * iteration and increase the max iterations when
   5874 			 * polling the phy; this fixes erroneous timeouts at
   5875 			 * 10Mbps.
   5876 			 */
   5877 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5878 			    0xFFFF);
   5879 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5880 			    &kmreg);
   5881 			kmreg |= 0x3F;
   5882 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5883 			    kmreg);
   5884 			break;
   5885 		default:
   5886 			break;
   5887 		}
   5888 
   5889 		if (sc->sc_type == WM_T_80003) {
   5890 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5891 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5892 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5893 
   5894 			/* Bypass RX and TX FIFO's */
   5895 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5896 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5897 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5898 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5899 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5900 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5901 		}
   5902 	}
   5903 #if 0
   5904 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5905 #endif
   5906 
   5907 	/* Set up checksum offload parameters. */
   5908 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5909 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5910 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5911 		reg |= RXCSUM_IPOFL;
   5912 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5913 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5914 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5915 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5916 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5917 
   5918 	/* Set registers about MSI-X */
   5919 	if (wm_is_using_msix(sc)) {
   5920 		uint32_t ivar, qintr_idx;
   5921 		struct wm_queue *wmq;
   5922 		unsigned int qid;
   5923 
   5924 		if (sc->sc_type == WM_T_82575) {
   5925 			/* Interrupt control */
   5926 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5927 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5928 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5929 
   5930 			/* TX and RX */
   5931 			for (i = 0; i < sc->sc_nqueues; i++) {
   5932 				wmq = &sc->sc_queue[i];
   5933 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5934 				    EITR_TX_QUEUE(wmq->wmq_id)
   5935 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5936 			}
   5937 			/* Link status */
   5938 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5939 			    EITR_OTHER);
   5940 		} else if (sc->sc_type == WM_T_82574) {
   5941 			/* Interrupt control */
   5942 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5943 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5944 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5945 
   5946 			/*
   5947 			 * Workaround issue with spurious interrupts
   5948 			 * in MSI-X mode.
   5949 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5950 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5951 			 */
   5952 			reg = CSR_READ(sc, WMREG_RFCTL);
   5953 			reg |= WMREG_RFCTL_ACKDIS;
   5954 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5955 
   5956 			ivar = 0;
   5957 			/* TX and RX */
   5958 			for (i = 0; i < sc->sc_nqueues; i++) {
   5959 				wmq = &sc->sc_queue[i];
   5960 				qid = wmq->wmq_id;
   5961 				qintr_idx = wmq->wmq_intr_idx;
   5962 
   5963 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5964 				    IVAR_TX_MASK_Q_82574(qid));
   5965 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5966 				    IVAR_RX_MASK_Q_82574(qid));
   5967 			}
   5968 			/* Link status */
   5969 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5970 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5971 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5972 		} else {
   5973 			/* Interrupt control */
   5974 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5975 			    | GPIE_EIAME | GPIE_PBA);
   5976 
   5977 			switch (sc->sc_type) {
   5978 			case WM_T_82580:
   5979 			case WM_T_I350:
   5980 			case WM_T_I354:
   5981 			case WM_T_I210:
   5982 			case WM_T_I211:
   5983 				/* TX and RX */
   5984 				for (i = 0; i < sc->sc_nqueues; i++) {
   5985 					wmq = &sc->sc_queue[i];
   5986 					qid = wmq->wmq_id;
   5987 					qintr_idx = wmq->wmq_intr_idx;
   5988 
   5989 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5990 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5991 					ivar |= __SHIFTIN((qintr_idx
   5992 						| IVAR_VALID),
   5993 					    IVAR_TX_MASK_Q(qid));
   5994 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5995 					ivar |= __SHIFTIN((qintr_idx
   5996 						| IVAR_VALID),
   5997 					    IVAR_RX_MASK_Q(qid));
   5998 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5999 				}
   6000 				break;
   6001 			case WM_T_82576:
   6002 				/* TX and RX */
   6003 				for (i = 0; i < sc->sc_nqueues; i++) {
   6004 					wmq = &sc->sc_queue[i];
   6005 					qid = wmq->wmq_id;
   6006 					qintr_idx = wmq->wmq_intr_idx;
   6007 
   6008 					ivar = CSR_READ(sc,
   6009 					    WMREG_IVAR_Q_82576(qid));
   6010 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6011 					ivar |= __SHIFTIN((qintr_idx
   6012 						| IVAR_VALID),
   6013 					    IVAR_TX_MASK_Q_82576(qid));
   6014 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6015 					ivar |= __SHIFTIN((qintr_idx
   6016 						| IVAR_VALID),
   6017 					    IVAR_RX_MASK_Q_82576(qid));
   6018 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6019 					    ivar);
   6020 				}
   6021 				break;
   6022 			default:
   6023 				break;
   6024 			}
   6025 
   6026 			/* Link status */
   6027 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6028 			    IVAR_MISC_OTHER);
   6029 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6030 		}
   6031 
   6032 		if (wm_is_using_multiqueue(sc)) {
   6033 			wm_init_rss(sc);
   6034 
   6035 			/*
   6036 			** NOTE: Receive Full-Packet Checksum Offload
   6037 			** is mutually exclusive with Multiqueue. However
   6038 			** this is not the same as TCP/IP checksums which
   6039 			** still work.
   6040 			*/
   6041 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6042 			reg |= RXCSUM_PCSD;
   6043 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6044 		}
   6045 	}
   6046 
   6047 	/* Set up the interrupt registers. */
   6048 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6049 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6050 	    ICR_RXO | ICR_RXT0;
   6051 	if (wm_is_using_msix(sc)) {
   6052 		uint32_t mask;
   6053 		struct wm_queue *wmq;
   6054 
   6055 		switch (sc->sc_type) {
   6056 		case WM_T_82574:
   6057 			mask = 0;
   6058 			for (i = 0; i < sc->sc_nqueues; i++) {
   6059 				wmq = &sc->sc_queue[i];
   6060 				mask |= ICR_TXQ(wmq->wmq_id);
   6061 				mask |= ICR_RXQ(wmq->wmq_id);
   6062 			}
   6063 			mask |= ICR_OTHER;
   6064 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6065 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6066 			break;
   6067 		default:
   6068 			if (sc->sc_type == WM_T_82575) {
   6069 				mask = 0;
   6070 				for (i = 0; i < sc->sc_nqueues; i++) {
   6071 					wmq = &sc->sc_queue[i];
   6072 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6073 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6074 				}
   6075 				mask |= EITR_OTHER;
   6076 			} else {
   6077 				mask = 0;
   6078 				for (i = 0; i < sc->sc_nqueues; i++) {
   6079 					wmq = &sc->sc_queue[i];
   6080 					mask |= 1 << wmq->wmq_intr_idx;
   6081 				}
   6082 				mask |= 1 << sc->sc_link_intr_idx;
   6083 			}
   6084 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6085 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6086 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6087 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6088 			break;
   6089 		}
   6090 	} else
   6091 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6092 
   6093 	/* Set up the inter-packet gap. */
   6094 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6095 
   6096 	if (sc->sc_type >= WM_T_82543) {
   6097 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6098 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6099 			wm_itrs_writereg(sc, wmq);
   6100 		}
   6101 		/*
   6102 		 * Link interrupts occur much less than TX
   6103 		 * interrupts and RX interrupts. So, we don't
   6104 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6105 		 * FreeBSD's if_igb.
   6106 		 */
   6107 	}
   6108 
   6109 	/* Set the VLAN ethernetype. */
   6110 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6111 
   6112 	/*
   6113 	 * Set up the transmit control register; we start out with
   6114 	 * a collision distance suitable for FDX, but update it whe
   6115 	 * we resolve the media type.
   6116 	 */
   6117 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6118 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6119 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6120 	if (sc->sc_type >= WM_T_82571)
   6121 		sc->sc_tctl |= TCTL_MULR;
   6122 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6123 
   6124 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6125 		/* Write TDT after TCTL.EN is set. See the document. */
   6126 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6127 	}
   6128 
   6129 	if (sc->sc_type == WM_T_80003) {
   6130 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6131 		reg &= ~TCTL_EXT_GCEX_MASK;
   6132 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6133 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6134 	}
   6135 
   6136 	/* Set the media. */
   6137 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6138 		goto out;
   6139 
   6140 	/* Configure for OS presence */
   6141 	wm_init_manageability(sc);
   6142 
   6143 	/*
   6144 	 * Set up the receive control register; we actually program the
   6145 	 * register when we set the receive filter. Use multicast address
   6146 	 * offset type 0.
   6147 	 *
   6148 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6149 	 * don't enable that feature.
   6150 	 */
   6151 	sc->sc_mchash_type = 0;
   6152 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6153 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6154 
   6155 	/* 82574 use one buffer extended Rx descriptor. */
   6156 	if (sc->sc_type == WM_T_82574)
   6157 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6158 
   6159 	/*
   6160 	 * The I350 has a bug where it always strips the CRC whether
   6161 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6162 	 */
   6163 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6164 	    || (sc->sc_type == WM_T_I210))
   6165 		sc->sc_rctl |= RCTL_SECRC;
   6166 
   6167 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6168 	    && (ifp->if_mtu > ETHERMTU)) {
   6169 		sc->sc_rctl |= RCTL_LPE;
   6170 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6171 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6172 	}
   6173 
   6174 	if (MCLBYTES == 2048)
   6175 		sc->sc_rctl |= RCTL_2k;
   6176 	else {
   6177 		if (sc->sc_type >= WM_T_82543) {
   6178 			switch (MCLBYTES) {
   6179 			case 4096:
   6180 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6181 				break;
   6182 			case 8192:
   6183 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6184 				break;
   6185 			case 16384:
   6186 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6187 				break;
   6188 			default:
   6189 				panic("wm_init: MCLBYTES %d unsupported",
   6190 				    MCLBYTES);
   6191 				break;
   6192 			}
   6193 		} else
   6194 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6195 	}
   6196 
   6197 	/* Enable ECC */
   6198 	switch (sc->sc_type) {
   6199 	case WM_T_82571:
   6200 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6201 		reg |= PBA_ECC_CORR_EN;
   6202 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6203 		break;
   6204 	case WM_T_PCH_LPT:
   6205 	case WM_T_PCH_SPT:
   6206 	case WM_T_PCH_CNP:
   6207 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6208 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6209 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6210 
   6211 		sc->sc_ctrl |= CTRL_MEHE;
   6212 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6213 		break;
   6214 	default:
   6215 		break;
   6216 	}
   6217 
   6218 	/*
   6219 	 * Set the receive filter.
   6220 	 *
   6221 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6222 	 * the setting of RCTL.EN in wm_set_filter()
   6223 	 */
   6224 	wm_set_filter(sc);
   6225 
   6226 	/* On 575 and later set RDT only if RX enabled */
   6227 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6228 		int qidx;
   6229 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6230 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6231 			for (i = 0; i < WM_NRXDESC; i++) {
   6232 				mutex_enter(rxq->rxq_lock);
   6233 				wm_init_rxdesc(rxq, i);
   6234 				mutex_exit(rxq->rxq_lock);
   6235 
   6236 			}
   6237 		}
   6238 	}
   6239 
   6240 	wm_unset_stopping_flags(sc);
   6241 
   6242 	/* Start the one second link check clock. */
   6243 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6244 
   6245 	/* ...all done! */
   6246 	ifp->if_flags |= IFF_RUNNING;
   6247 	ifp->if_flags &= ~IFF_OACTIVE;
   6248 
   6249  out:
   6250 	/* Save last flags for the callback */
   6251 	sc->sc_if_flags = ifp->if_flags;
   6252 	sc->sc_ec_capenable = ec->ec_capenable;
   6253 	if (error)
   6254 		log(LOG_ERR, "%s: interface not running\n",
   6255 		    device_xname(sc->sc_dev));
   6256 	return error;
   6257 }
   6258 
   6259 /*
   6260  * wm_stop:		[ifnet interface function]
   6261  *
   6262  *	Stop transmission on the interface.
   6263  */
   6264 static void
   6265 wm_stop(struct ifnet *ifp, int disable)
   6266 {
   6267 	struct wm_softc *sc = ifp->if_softc;
   6268 
   6269 	WM_CORE_LOCK(sc);
   6270 	wm_stop_locked(ifp, disable);
   6271 	WM_CORE_UNLOCK(sc);
   6272 }
   6273 
   6274 static void
   6275 wm_stop_locked(struct ifnet *ifp, int disable)
   6276 {
   6277 	struct wm_softc *sc = ifp->if_softc;
   6278 	struct wm_txsoft *txs;
   6279 	int i, qidx;
   6280 
   6281 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6282 		device_xname(sc->sc_dev), __func__));
   6283 	KASSERT(WM_CORE_LOCKED(sc));
   6284 
   6285 	wm_set_stopping_flags(sc);
   6286 
   6287 	/* Stop the one second clock. */
   6288 	callout_stop(&sc->sc_tick_ch);
   6289 
   6290 	/* Stop the 82547 Tx FIFO stall check timer. */
   6291 	if (sc->sc_type == WM_T_82547)
   6292 		callout_stop(&sc->sc_txfifo_ch);
   6293 
   6294 	if (sc->sc_flags & WM_F_HAS_MII) {
   6295 		/* Down the MII. */
   6296 		mii_down(&sc->sc_mii);
   6297 	} else {
   6298 #if 0
   6299 		/* Should we clear PHY's status properly? */
   6300 		wm_reset(sc);
   6301 #endif
   6302 	}
   6303 
   6304 	/* Stop the transmit and receive processes. */
   6305 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6306 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6307 	sc->sc_rctl &= ~RCTL_EN;
   6308 
   6309 	/*
   6310 	 * Clear the interrupt mask to ensure the device cannot assert its
   6311 	 * interrupt line.
   6312 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6313 	 * service any currently pending or shared interrupt.
   6314 	 */
   6315 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6316 	sc->sc_icr = 0;
   6317 	if (wm_is_using_msix(sc)) {
   6318 		if (sc->sc_type != WM_T_82574) {
   6319 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6320 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6321 		} else
   6322 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6323 	}
   6324 
   6325 	/* Release any queued transmit buffers. */
   6326 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6327 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6328 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6329 		mutex_enter(txq->txq_lock);
   6330 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6331 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6332 			txs = &txq->txq_soft[i];
   6333 			if (txs->txs_mbuf != NULL) {
   6334 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6335 				m_freem(txs->txs_mbuf);
   6336 				txs->txs_mbuf = NULL;
   6337 			}
   6338 		}
   6339 		mutex_exit(txq->txq_lock);
   6340 	}
   6341 
   6342 	/* Mark the interface as down and cancel the watchdog timer. */
   6343 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6344 
   6345 	if (disable) {
   6346 		for (i = 0; i < sc->sc_nqueues; i++) {
   6347 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6348 			mutex_enter(rxq->rxq_lock);
   6349 			wm_rxdrain(rxq);
   6350 			mutex_exit(rxq->rxq_lock);
   6351 		}
   6352 	}
   6353 
   6354 #if 0 /* notyet */
   6355 	if (sc->sc_type >= WM_T_82544)
   6356 		CSR_WRITE(sc, WMREG_WUC, 0);
   6357 #endif
   6358 }
   6359 
   6360 static void
   6361 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6362 {
   6363 	struct mbuf *m;
   6364 	int i;
   6365 
   6366 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6367 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6368 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6369 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6370 		    m->m_data, m->m_len, m->m_flags);
   6371 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6372 	    i, i == 1 ? "" : "s");
   6373 }
   6374 
   6375 /*
   6376  * wm_82547_txfifo_stall:
   6377  *
   6378  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6379  *	reset the FIFO pointers, and restart packet transmission.
   6380  */
   6381 static void
   6382 wm_82547_txfifo_stall(void *arg)
   6383 {
   6384 	struct wm_softc *sc = arg;
   6385 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6386 
   6387 	mutex_enter(txq->txq_lock);
   6388 
   6389 	if (txq->txq_stopping)
   6390 		goto out;
   6391 
   6392 	if (txq->txq_fifo_stall) {
   6393 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6394 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6395 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6396 			/*
   6397 			 * Packets have drained.  Stop transmitter, reset
   6398 			 * FIFO pointers, restart transmitter, and kick
   6399 			 * the packet queue.
   6400 			 */
   6401 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6402 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6403 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6404 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6405 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6406 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6407 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6408 			CSR_WRITE_FLUSH(sc);
   6409 
   6410 			txq->txq_fifo_head = 0;
   6411 			txq->txq_fifo_stall = 0;
   6412 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6413 		} else {
   6414 			/*
   6415 			 * Still waiting for packets to drain; try again in
   6416 			 * another tick.
   6417 			 */
   6418 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6419 		}
   6420 	}
   6421 
   6422 out:
   6423 	mutex_exit(txq->txq_lock);
   6424 }
   6425 
   6426 /*
   6427  * wm_82547_txfifo_bugchk:
   6428  *
   6429  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6430  *	prevent enqueueing a packet that would wrap around the end
   6431  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6432  *
   6433  *	We do this by checking the amount of space before the end
   6434  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6435  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6436  *	the internal FIFO pointers to the beginning, and restart
   6437  *	transmission on the interface.
   6438  */
   6439 #define	WM_FIFO_HDR		0x10
   6440 #define	WM_82547_PAD_LEN	0x3e0
   6441 static int
   6442 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6443 {
   6444 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6445 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6446 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6447 
   6448 	/* Just return if already stalled. */
   6449 	if (txq->txq_fifo_stall)
   6450 		return 1;
   6451 
   6452 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6453 		/* Stall only occurs in half-duplex mode. */
   6454 		goto send_packet;
   6455 	}
   6456 
   6457 	if (len >= WM_82547_PAD_LEN + space) {
   6458 		txq->txq_fifo_stall = 1;
   6459 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6460 		return 1;
   6461 	}
   6462 
   6463  send_packet:
   6464 	txq->txq_fifo_head += len;
   6465 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6466 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6467 
   6468 	return 0;
   6469 }
   6470 
   6471 static int
   6472 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6473 {
   6474 	int error;
   6475 
   6476 	/*
   6477 	 * Allocate the control data structures, and create and load the
   6478 	 * DMA map for it.
   6479 	 *
   6480 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6481 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6482 	 * both sets within the same 4G segment.
   6483 	 */
   6484 	if (sc->sc_type < WM_T_82544)
   6485 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6486 	else
   6487 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6488 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6489 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6490 	else
   6491 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6492 
   6493 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6494 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6495 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6496 		aprint_error_dev(sc->sc_dev,
   6497 		    "unable to allocate TX control data, error = %d\n",
   6498 		    error);
   6499 		goto fail_0;
   6500 	}
   6501 
   6502 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6503 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6504 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6505 		aprint_error_dev(sc->sc_dev,
   6506 		    "unable to map TX control data, error = %d\n", error);
   6507 		goto fail_1;
   6508 	}
   6509 
   6510 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6511 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6512 		aprint_error_dev(sc->sc_dev,
   6513 		    "unable to create TX control data DMA map, error = %d\n",
   6514 		    error);
   6515 		goto fail_2;
   6516 	}
   6517 
   6518 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6519 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6520 		aprint_error_dev(sc->sc_dev,
   6521 		    "unable to load TX control data DMA map, error = %d\n",
   6522 		    error);
   6523 		goto fail_3;
   6524 	}
   6525 
   6526 	return 0;
   6527 
   6528  fail_3:
   6529 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6530  fail_2:
   6531 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6532 	    WM_TXDESCS_SIZE(txq));
   6533  fail_1:
   6534 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6535  fail_0:
   6536 	return error;
   6537 }
   6538 
   6539 static void
   6540 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6541 {
   6542 
   6543 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6544 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6545 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6546 	    WM_TXDESCS_SIZE(txq));
   6547 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6548 }
   6549 
   6550 static int
   6551 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6552 {
   6553 	int error;
   6554 	size_t rxq_descs_size;
   6555 
   6556 	/*
   6557 	 * Allocate the control data structures, and create and load the
   6558 	 * DMA map for it.
   6559 	 *
   6560 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6561 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6562 	 * both sets within the same 4G segment.
   6563 	 */
   6564 	rxq->rxq_ndesc = WM_NRXDESC;
   6565 	if (sc->sc_type == WM_T_82574)
   6566 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6567 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6568 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6569 	else
   6570 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6571 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6572 
   6573 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6574 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6575 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6576 		aprint_error_dev(sc->sc_dev,
   6577 		    "unable to allocate RX control data, error = %d\n",
   6578 		    error);
   6579 		goto fail_0;
   6580 	}
   6581 
   6582 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6583 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6584 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6585 		aprint_error_dev(sc->sc_dev,
   6586 		    "unable to map RX control data, error = %d\n", error);
   6587 		goto fail_1;
   6588 	}
   6589 
   6590 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6591 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6592 		aprint_error_dev(sc->sc_dev,
   6593 		    "unable to create RX control data DMA map, error = %d\n",
   6594 		    error);
   6595 		goto fail_2;
   6596 	}
   6597 
   6598 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6599 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6600 		aprint_error_dev(sc->sc_dev,
   6601 		    "unable to load RX control data DMA map, error = %d\n",
   6602 		    error);
   6603 		goto fail_3;
   6604 	}
   6605 
   6606 	return 0;
   6607 
   6608  fail_3:
   6609 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6610  fail_2:
   6611 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6612 	    rxq_descs_size);
   6613  fail_1:
   6614 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6615  fail_0:
   6616 	return error;
   6617 }
   6618 
   6619 static void
   6620 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6621 {
   6622 
   6623 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6624 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6625 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6626 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6627 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6628 }
   6629 
   6630 
   6631 static int
   6632 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6633 {
   6634 	int i, error;
   6635 
   6636 	/* Create the transmit buffer DMA maps. */
   6637 	WM_TXQUEUELEN(txq) =
   6638 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6639 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6640 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6641 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6642 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6643 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6644 			aprint_error_dev(sc->sc_dev,
   6645 			    "unable to create Tx DMA map %d, error = %d\n",
   6646 			    i, error);
   6647 			goto fail;
   6648 		}
   6649 	}
   6650 
   6651 	return 0;
   6652 
   6653  fail:
   6654 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6655 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6656 			bus_dmamap_destroy(sc->sc_dmat,
   6657 			    txq->txq_soft[i].txs_dmamap);
   6658 	}
   6659 	return error;
   6660 }
   6661 
   6662 static void
   6663 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6664 {
   6665 	int i;
   6666 
   6667 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6668 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6669 			bus_dmamap_destroy(sc->sc_dmat,
   6670 			    txq->txq_soft[i].txs_dmamap);
   6671 	}
   6672 }
   6673 
   6674 static int
   6675 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6676 {
   6677 	int i, error;
   6678 
   6679 	/* Create the receive buffer DMA maps. */
   6680 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6681 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6682 			    MCLBYTES, 0, 0,
   6683 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6684 			aprint_error_dev(sc->sc_dev,
   6685 			    "unable to create Rx DMA map %d error = %d\n",
   6686 			    i, error);
   6687 			goto fail;
   6688 		}
   6689 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6690 	}
   6691 
   6692 	return 0;
   6693 
   6694  fail:
   6695 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6696 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6697 			bus_dmamap_destroy(sc->sc_dmat,
   6698 			    rxq->rxq_soft[i].rxs_dmamap);
   6699 	}
   6700 	return error;
   6701 }
   6702 
   6703 static void
   6704 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6705 {
   6706 	int i;
   6707 
   6708 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6709 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6710 			bus_dmamap_destroy(sc->sc_dmat,
   6711 			    rxq->rxq_soft[i].rxs_dmamap);
   6712 	}
   6713 }
   6714 
   6715 /*
   6716  * wm_alloc_quques:
   6717  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6718  */
   6719 static int
   6720 wm_alloc_txrx_queues(struct wm_softc *sc)
   6721 {
   6722 	int i, error, tx_done, rx_done;
   6723 
   6724 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6725 	    KM_SLEEP);
   6726 	if (sc->sc_queue == NULL) {
   6727 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6728 		error = ENOMEM;
   6729 		goto fail_0;
   6730 	}
   6731 
   6732 	/* For transmission */
   6733 	error = 0;
   6734 	tx_done = 0;
   6735 	for (i = 0; i < sc->sc_nqueues; i++) {
   6736 #ifdef WM_EVENT_COUNTERS
   6737 		int j;
   6738 		const char *xname;
   6739 #endif
   6740 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6741 		txq->txq_sc = sc;
   6742 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6743 
   6744 		error = wm_alloc_tx_descs(sc, txq);
   6745 		if (error)
   6746 			break;
   6747 		error = wm_alloc_tx_buffer(sc, txq);
   6748 		if (error) {
   6749 			wm_free_tx_descs(sc, txq);
   6750 			break;
   6751 		}
   6752 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6753 		if (txq->txq_interq == NULL) {
   6754 			wm_free_tx_descs(sc, txq);
   6755 			wm_free_tx_buffer(sc, txq);
   6756 			error = ENOMEM;
   6757 			break;
   6758 		}
   6759 
   6760 #ifdef WM_EVENT_COUNTERS
   6761 		xname = device_xname(sc->sc_dev);
   6762 
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6764 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6765 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6766 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6767 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6772 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6773 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6774 
   6775 		for (j = 0; j < WM_NTXSEGS; j++) {
   6776 			snprintf(txq->txq_txseg_evcnt_names[j],
   6777 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6778 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6779 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6780 		}
   6781 
   6782 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6783 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6784 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6785 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6786 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6787 #endif /* WM_EVENT_COUNTERS */
   6788 
   6789 		tx_done++;
   6790 	}
   6791 	if (error)
   6792 		goto fail_1;
   6793 
   6794 	/* For receive */
   6795 	error = 0;
   6796 	rx_done = 0;
   6797 	for (i = 0; i < sc->sc_nqueues; i++) {
   6798 #ifdef WM_EVENT_COUNTERS
   6799 		const char *xname;
   6800 #endif
   6801 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6802 		rxq->rxq_sc = sc;
   6803 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6804 
   6805 		error = wm_alloc_rx_descs(sc, rxq);
   6806 		if (error)
   6807 			break;
   6808 
   6809 		error = wm_alloc_rx_buffer(sc, rxq);
   6810 		if (error) {
   6811 			wm_free_rx_descs(sc, rxq);
   6812 			break;
   6813 		}
   6814 
   6815 #ifdef WM_EVENT_COUNTERS
   6816 		xname = device_xname(sc->sc_dev);
   6817 
   6818 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6819 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6820 
   6821 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6822 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6823 #endif /* WM_EVENT_COUNTERS */
   6824 
   6825 		rx_done++;
   6826 	}
   6827 	if (error)
   6828 		goto fail_2;
   6829 
   6830 	return 0;
   6831 
   6832  fail_2:
   6833 	for (i = 0; i < rx_done; i++) {
   6834 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6835 		wm_free_rx_buffer(sc, rxq);
   6836 		wm_free_rx_descs(sc, rxq);
   6837 		if (rxq->rxq_lock)
   6838 			mutex_obj_free(rxq->rxq_lock);
   6839 	}
   6840  fail_1:
   6841 	for (i = 0; i < tx_done; i++) {
   6842 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6843 		pcq_destroy(txq->txq_interq);
   6844 		wm_free_tx_buffer(sc, txq);
   6845 		wm_free_tx_descs(sc, txq);
   6846 		if (txq->txq_lock)
   6847 			mutex_obj_free(txq->txq_lock);
   6848 	}
   6849 
   6850 	kmem_free(sc->sc_queue,
   6851 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6852  fail_0:
   6853 	return error;
   6854 }
   6855 
   6856 /*
   6857  * wm_free_quques:
   6858  *	Free {tx,rx}descs and {tx,rx} buffers
   6859  */
   6860 static void
   6861 wm_free_txrx_queues(struct wm_softc *sc)
   6862 {
   6863 	int i;
   6864 
   6865 	for (i = 0; i < sc->sc_nqueues; i++) {
   6866 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6867 
   6868 #ifdef WM_EVENT_COUNTERS
   6869 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6870 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6871 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6872 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6873 #endif /* WM_EVENT_COUNTERS */
   6874 
   6875 		wm_free_rx_buffer(sc, rxq);
   6876 		wm_free_rx_descs(sc, rxq);
   6877 		if (rxq->rxq_lock)
   6878 			mutex_obj_free(rxq->rxq_lock);
   6879 	}
   6880 
   6881 	for (i = 0; i < sc->sc_nqueues; i++) {
   6882 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6883 		struct mbuf *m;
   6884 #ifdef WM_EVENT_COUNTERS
   6885 		int j;
   6886 
   6887 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6888 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6893 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6894 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6895 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6896 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6897 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6898 
   6899 		for (j = 0; j < WM_NTXSEGS; j++)
   6900 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6901 
   6902 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6905 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6906 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6907 #endif /* WM_EVENT_COUNTERS */
   6908 
   6909 		/* Drain txq_interq */
   6910 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6911 			m_freem(m);
   6912 		pcq_destroy(txq->txq_interq);
   6913 
   6914 		wm_free_tx_buffer(sc, txq);
   6915 		wm_free_tx_descs(sc, txq);
   6916 		if (txq->txq_lock)
   6917 			mutex_obj_free(txq->txq_lock);
   6918 	}
   6919 
   6920 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6921 }
   6922 
   6923 static void
   6924 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6925 {
   6926 
   6927 	KASSERT(mutex_owned(txq->txq_lock));
   6928 
   6929 	/* Initialize the transmit descriptor ring. */
   6930 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6931 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6932 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6933 	txq->txq_free = WM_NTXDESC(txq);
   6934 	txq->txq_next = 0;
   6935 }
   6936 
   6937 static void
   6938 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6939     struct wm_txqueue *txq)
   6940 {
   6941 
   6942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6943 		device_xname(sc->sc_dev), __func__));
   6944 	KASSERT(mutex_owned(txq->txq_lock));
   6945 
   6946 	if (sc->sc_type < WM_T_82543) {
   6947 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6948 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6949 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6950 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6951 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6952 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6953 	} else {
   6954 		int qid = wmq->wmq_id;
   6955 
   6956 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6957 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6958 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6959 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6960 
   6961 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6962 			/*
   6963 			 * Don't write TDT before TCTL.EN is set.
   6964 			 * See the document.
   6965 			 */
   6966 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6967 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6968 			    | TXDCTL_WTHRESH(0));
   6969 		else {
   6970 			/* XXX should update with AIM? */
   6971 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6972 			if (sc->sc_type >= WM_T_82540) {
   6973 				/* Should be the same */
   6974 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6975 			}
   6976 
   6977 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6978 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6979 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6980 		}
   6981 	}
   6982 }
   6983 
   6984 static void
   6985 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6986 {
   6987 	int i;
   6988 
   6989 	KASSERT(mutex_owned(txq->txq_lock));
   6990 
   6991 	/* Initialize the transmit job descriptors. */
   6992 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6993 		txq->txq_soft[i].txs_mbuf = NULL;
   6994 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6995 	txq->txq_snext = 0;
   6996 	txq->txq_sdirty = 0;
   6997 }
   6998 
   6999 static void
   7000 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7001     struct wm_txqueue *txq)
   7002 {
   7003 
   7004 	KASSERT(mutex_owned(txq->txq_lock));
   7005 
   7006 	/*
   7007 	 * Set up some register offsets that are different between
   7008 	 * the i82542 and the i82543 and later chips.
   7009 	 */
   7010 	if (sc->sc_type < WM_T_82543)
   7011 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7012 	else
   7013 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7014 
   7015 	wm_init_tx_descs(sc, txq);
   7016 	wm_init_tx_regs(sc, wmq, txq);
   7017 	wm_init_tx_buffer(sc, txq);
   7018 
   7019 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7020 	txq->txq_sending = false;
   7021 }
   7022 
   7023 static void
   7024 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7025     struct wm_rxqueue *rxq)
   7026 {
   7027 
   7028 	KASSERT(mutex_owned(rxq->rxq_lock));
   7029 
   7030 	/*
   7031 	 * Initialize the receive descriptor and receive job
   7032 	 * descriptor rings.
   7033 	 */
   7034 	if (sc->sc_type < WM_T_82543) {
   7035 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7036 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7037 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7038 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7039 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7040 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7041 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7042 
   7043 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7044 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7045 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7046 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7047 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7048 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7049 	} else {
   7050 		int qid = wmq->wmq_id;
   7051 
   7052 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7053 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7054 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7055 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7056 
   7057 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7058 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7059 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7060 
   7061 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7062 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7063 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7064 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7065 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7066 			    | RXDCTL_WTHRESH(1));
   7067 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7068 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7069 		} else {
   7070 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7071 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7072 			/* XXX should update with AIM? */
   7073 			CSR_WRITE(sc, WMREG_RDTR,
   7074 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7075 			/* MUST be same */
   7076 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7077 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7078 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7079 		}
   7080 	}
   7081 }
   7082 
   7083 static int
   7084 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7085 {
   7086 	struct wm_rxsoft *rxs;
   7087 	int error, i;
   7088 
   7089 	KASSERT(mutex_owned(rxq->rxq_lock));
   7090 
   7091 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7092 		rxs = &rxq->rxq_soft[i];
   7093 		if (rxs->rxs_mbuf == NULL) {
   7094 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7095 				log(LOG_ERR, "%s: unable to allocate or map "
   7096 				    "rx buffer %d, error = %d\n",
   7097 				    device_xname(sc->sc_dev), i, error);
   7098 				/*
   7099 				 * XXX Should attempt to run with fewer receive
   7100 				 * XXX buffers instead of just failing.
   7101 				 */
   7102 				wm_rxdrain(rxq);
   7103 				return ENOMEM;
   7104 			}
   7105 		} else {
   7106 			/*
   7107 			 * For 82575 and 82576, the RX descriptors must be
   7108 			 * initialized after the setting of RCTL.EN in
   7109 			 * wm_set_filter()
   7110 			 */
   7111 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7112 				wm_init_rxdesc(rxq, i);
   7113 		}
   7114 	}
   7115 	rxq->rxq_ptr = 0;
   7116 	rxq->rxq_discard = 0;
   7117 	WM_RXCHAIN_RESET(rxq);
   7118 
   7119 	return 0;
   7120 }
   7121 
   7122 static int
   7123 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7124     struct wm_rxqueue *rxq)
   7125 {
   7126 
   7127 	KASSERT(mutex_owned(rxq->rxq_lock));
   7128 
   7129 	/*
   7130 	 * Set up some register offsets that are different between
   7131 	 * the i82542 and the i82543 and later chips.
   7132 	 */
   7133 	if (sc->sc_type < WM_T_82543)
   7134 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7135 	else
   7136 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7137 
   7138 	wm_init_rx_regs(sc, wmq, rxq);
   7139 	return wm_init_rx_buffer(sc, rxq);
   7140 }
   7141 
   7142 /*
   7143  * wm_init_quques:
   7144  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7145  */
   7146 static int
   7147 wm_init_txrx_queues(struct wm_softc *sc)
   7148 {
   7149 	int i, error = 0;
   7150 
   7151 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7152 		device_xname(sc->sc_dev), __func__));
   7153 
   7154 	for (i = 0; i < sc->sc_nqueues; i++) {
   7155 		struct wm_queue *wmq = &sc->sc_queue[i];
   7156 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7157 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7158 
   7159 		/*
   7160 		 * TODO
   7161 		 * Currently, use constant variable instead of AIM.
   7162 		 * Furthermore, the interrupt interval of multiqueue which use
   7163 		 * polling mode is less than default value.
   7164 		 * More tuning and AIM are required.
   7165 		 */
   7166 		if (wm_is_using_multiqueue(sc))
   7167 			wmq->wmq_itr = 50;
   7168 		else
   7169 			wmq->wmq_itr = sc->sc_itr_init;
   7170 		wmq->wmq_set_itr = true;
   7171 
   7172 		mutex_enter(txq->txq_lock);
   7173 		wm_init_tx_queue(sc, wmq, txq);
   7174 		mutex_exit(txq->txq_lock);
   7175 
   7176 		mutex_enter(rxq->rxq_lock);
   7177 		error = wm_init_rx_queue(sc, wmq, rxq);
   7178 		mutex_exit(rxq->rxq_lock);
   7179 		if (error)
   7180 			break;
   7181 	}
   7182 
   7183 	return error;
   7184 }
   7185 
   7186 /*
   7187  * wm_tx_offload:
   7188  *
   7189  *	Set up TCP/IP checksumming parameters for the
   7190  *	specified packet.
   7191  */
   7192 static int
   7193 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7194     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7195 {
   7196 	struct mbuf *m0 = txs->txs_mbuf;
   7197 	struct livengood_tcpip_ctxdesc *t;
   7198 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7199 	uint32_t ipcse;
   7200 	struct ether_header *eh;
   7201 	int offset, iphl;
   7202 	uint8_t fields;
   7203 
   7204 	/*
   7205 	 * XXX It would be nice if the mbuf pkthdr had offset
   7206 	 * fields for the protocol headers.
   7207 	 */
   7208 
   7209 	eh = mtod(m0, struct ether_header *);
   7210 	switch (htons(eh->ether_type)) {
   7211 	case ETHERTYPE_IP:
   7212 	case ETHERTYPE_IPV6:
   7213 		offset = ETHER_HDR_LEN;
   7214 		break;
   7215 
   7216 	case ETHERTYPE_VLAN:
   7217 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7218 		break;
   7219 
   7220 	default:
   7221 		/* Don't support this protocol or encapsulation. */
   7222 		*fieldsp = 0;
   7223 		*cmdp = 0;
   7224 		return 0;
   7225 	}
   7226 
   7227 	if ((m0->m_pkthdr.csum_flags &
   7228 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7229 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7230 	} else
   7231 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7232 
   7233 	ipcse = offset + iphl - 1;
   7234 
   7235 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7236 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7237 	seg = 0;
   7238 	fields = 0;
   7239 
   7240 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7241 		int hlen = offset + iphl;
   7242 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7243 
   7244 		if (__predict_false(m0->m_len <
   7245 				    (hlen + sizeof(struct tcphdr)))) {
   7246 			/*
   7247 			 * TCP/IP headers are not in the first mbuf; we need
   7248 			 * to do this the slow and painful way. Let's just
   7249 			 * hope this doesn't happen very often.
   7250 			 */
   7251 			struct tcphdr th;
   7252 
   7253 			WM_Q_EVCNT_INCR(txq, tsopain);
   7254 
   7255 			m_copydata(m0, hlen, sizeof(th), &th);
   7256 			if (v4) {
   7257 				struct ip ip;
   7258 
   7259 				m_copydata(m0, offset, sizeof(ip), &ip);
   7260 				ip.ip_len = 0;
   7261 				m_copyback(m0,
   7262 				    offset + offsetof(struct ip, ip_len),
   7263 				    sizeof(ip.ip_len), &ip.ip_len);
   7264 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7265 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7266 			} else {
   7267 				struct ip6_hdr ip6;
   7268 
   7269 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7270 				ip6.ip6_plen = 0;
   7271 				m_copyback(m0,
   7272 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7273 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7274 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7275 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7276 			}
   7277 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7278 			    sizeof(th.th_sum), &th.th_sum);
   7279 
   7280 			hlen += th.th_off << 2;
   7281 		} else {
   7282 			/*
   7283 			 * TCP/IP headers are in the first mbuf; we can do
   7284 			 * this the easy way.
   7285 			 */
   7286 			struct tcphdr *th;
   7287 
   7288 			if (v4) {
   7289 				struct ip *ip =
   7290 				    (void *)(mtod(m0, char *) + offset);
   7291 				th = (void *)(mtod(m0, char *) + hlen);
   7292 
   7293 				ip->ip_len = 0;
   7294 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7295 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7296 			} else {
   7297 				struct ip6_hdr *ip6 =
   7298 				    (void *)(mtod(m0, char *) + offset);
   7299 				th = (void *)(mtod(m0, char *) + hlen);
   7300 
   7301 				ip6->ip6_plen = 0;
   7302 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7303 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7304 			}
   7305 			hlen += th->th_off << 2;
   7306 		}
   7307 
   7308 		if (v4) {
   7309 			WM_Q_EVCNT_INCR(txq, tso);
   7310 			cmdlen |= WTX_TCPIP_CMD_IP;
   7311 		} else {
   7312 			WM_Q_EVCNT_INCR(txq, tso6);
   7313 			ipcse = 0;
   7314 		}
   7315 		cmd |= WTX_TCPIP_CMD_TSE;
   7316 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7317 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7318 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7319 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7320 	}
   7321 
   7322 	/*
   7323 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7324 	 * offload feature, if we load the context descriptor, we
   7325 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7326 	 */
   7327 
   7328 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7329 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7330 	    WTX_TCPIP_IPCSE(ipcse);
   7331 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7332 		WM_Q_EVCNT_INCR(txq, ipsum);
   7333 		fields |= WTX_IXSM;
   7334 	}
   7335 
   7336 	offset += iphl;
   7337 
   7338 	if (m0->m_pkthdr.csum_flags &
   7339 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7340 		WM_Q_EVCNT_INCR(txq, tusum);
   7341 		fields |= WTX_TXSM;
   7342 		tucs = WTX_TCPIP_TUCSS(offset) |
   7343 		    WTX_TCPIP_TUCSO(offset +
   7344 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7345 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7346 	} else if ((m0->m_pkthdr.csum_flags &
   7347 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7348 		WM_Q_EVCNT_INCR(txq, tusum6);
   7349 		fields |= WTX_TXSM;
   7350 		tucs = WTX_TCPIP_TUCSS(offset) |
   7351 		    WTX_TCPIP_TUCSO(offset +
   7352 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7353 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7354 	} else {
   7355 		/* Just initialize it to a valid TCP context. */
   7356 		tucs = WTX_TCPIP_TUCSS(offset) |
   7357 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7358 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7359 	}
   7360 
   7361 	/*
   7362 	 * We don't have to write context descriptor for every packet
   7363 	 * except for 82574. For 82574, we must write context descriptor
   7364 	 * for every packet when we use two descriptor queues.
   7365 	 * It would be overhead to write context descriptor for every packet,
   7366 	 * however it does not cause problems.
   7367 	 */
   7368 	/* Fill in the context descriptor. */
   7369 	t = (struct livengood_tcpip_ctxdesc *)
   7370 	    &txq->txq_descs[txq->txq_next];
   7371 	t->tcpip_ipcs = htole32(ipcs);
   7372 	t->tcpip_tucs = htole32(tucs);
   7373 	t->tcpip_cmdlen = htole32(cmdlen);
   7374 	t->tcpip_seg = htole32(seg);
   7375 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7376 
   7377 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7378 	txs->txs_ndesc++;
   7379 
   7380 	*cmdp = cmd;
   7381 	*fieldsp = fields;
   7382 
   7383 	return 0;
   7384 }
   7385 
   7386 static inline int
   7387 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7388 {
   7389 	struct wm_softc *sc = ifp->if_softc;
   7390 	u_int cpuid = cpu_index(curcpu());
   7391 
   7392 	/*
   7393 	 * Currently, simple distribute strategy.
   7394 	 * TODO:
   7395 	 * distribute by flowid(RSS has value).
   7396 	 */
   7397 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7398 }
   7399 
   7400 /*
   7401  * wm_start:		[ifnet interface function]
   7402  *
   7403  *	Start packet transmission on the interface.
   7404  */
   7405 static void
   7406 wm_start(struct ifnet *ifp)
   7407 {
   7408 	struct wm_softc *sc = ifp->if_softc;
   7409 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7410 
   7411 #ifdef WM_MPSAFE
   7412 	KASSERT(if_is_mpsafe(ifp));
   7413 #endif
   7414 	/*
   7415 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7416 	 */
   7417 
   7418 	mutex_enter(txq->txq_lock);
   7419 	if (!txq->txq_stopping)
   7420 		wm_start_locked(ifp);
   7421 	mutex_exit(txq->txq_lock);
   7422 }
   7423 
   7424 static void
   7425 wm_start_locked(struct ifnet *ifp)
   7426 {
   7427 	struct wm_softc *sc = ifp->if_softc;
   7428 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7429 
   7430 	wm_send_common_locked(ifp, txq, false);
   7431 }
   7432 
   7433 static int
   7434 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7435 {
   7436 	int qid;
   7437 	struct wm_softc *sc = ifp->if_softc;
   7438 	struct wm_txqueue *txq;
   7439 
   7440 	qid = wm_select_txqueue(ifp, m);
   7441 	txq = &sc->sc_queue[qid].wmq_txq;
   7442 
   7443 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7444 		m_freem(m);
   7445 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7446 		return ENOBUFS;
   7447 	}
   7448 
   7449 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7450 	ifp->if_obytes += m->m_pkthdr.len;
   7451 	if (m->m_flags & M_MCAST)
   7452 		ifp->if_omcasts++;
   7453 
   7454 	if (mutex_tryenter(txq->txq_lock)) {
   7455 		if (!txq->txq_stopping)
   7456 			wm_transmit_locked(ifp, txq);
   7457 		mutex_exit(txq->txq_lock);
   7458 	}
   7459 
   7460 	return 0;
   7461 }
   7462 
   7463 static void
   7464 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7465 {
   7466 
   7467 	wm_send_common_locked(ifp, txq, true);
   7468 }
   7469 
   7470 static void
   7471 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7472     bool is_transmit)
   7473 {
   7474 	struct wm_softc *sc = ifp->if_softc;
   7475 	struct mbuf *m0;
   7476 	struct wm_txsoft *txs;
   7477 	bus_dmamap_t dmamap;
   7478 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7479 	bus_addr_t curaddr;
   7480 	bus_size_t seglen, curlen;
   7481 	uint32_t cksumcmd;
   7482 	uint8_t cksumfields;
   7483 	bool remap = true;
   7484 
   7485 	KASSERT(mutex_owned(txq->txq_lock));
   7486 
   7487 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7488 		return;
   7489 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7490 		return;
   7491 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7492 		return;
   7493 
   7494 	/* Remember the previous number of free descriptors. */
   7495 	ofree = txq->txq_free;
   7496 
   7497 	/*
   7498 	 * Loop through the send queue, setting up transmit descriptors
   7499 	 * until we drain the queue, or use up all available transmit
   7500 	 * descriptors.
   7501 	 */
   7502 	for (;;) {
   7503 		m0 = NULL;
   7504 
   7505 		/* Get a work queue entry. */
   7506 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7507 			wm_txeof(txq, UINT_MAX);
   7508 			if (txq->txq_sfree == 0) {
   7509 				DPRINTF(WM_DEBUG_TX,
   7510 				    ("%s: TX: no free job descriptors\n",
   7511 					device_xname(sc->sc_dev)));
   7512 				WM_Q_EVCNT_INCR(txq, txsstall);
   7513 				break;
   7514 			}
   7515 		}
   7516 
   7517 		/* Grab a packet off the queue. */
   7518 		if (is_transmit)
   7519 			m0 = pcq_get(txq->txq_interq);
   7520 		else
   7521 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7522 		if (m0 == NULL)
   7523 			break;
   7524 
   7525 		DPRINTF(WM_DEBUG_TX,
   7526 		    ("%s: TX: have packet to transmit: %p\n",
   7527 			device_xname(sc->sc_dev), m0));
   7528 
   7529 		txs = &txq->txq_soft[txq->txq_snext];
   7530 		dmamap = txs->txs_dmamap;
   7531 
   7532 		use_tso = (m0->m_pkthdr.csum_flags &
   7533 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7534 
   7535 		/*
   7536 		 * So says the Linux driver:
   7537 		 * The controller does a simple calculation to make sure
   7538 		 * there is enough room in the FIFO before initiating the
   7539 		 * DMA for each buffer. The calc is:
   7540 		 *	4 = ceil(buffer len / MSS)
   7541 		 * To make sure we don't overrun the FIFO, adjust the max
   7542 		 * buffer len if the MSS drops.
   7543 		 */
   7544 		dmamap->dm_maxsegsz =
   7545 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7546 		    ? m0->m_pkthdr.segsz << 2
   7547 		    : WTX_MAX_LEN;
   7548 
   7549 		/*
   7550 		 * Load the DMA map.  If this fails, the packet either
   7551 		 * didn't fit in the allotted number of segments, or we
   7552 		 * were short on resources.  For the too-many-segments
   7553 		 * case, we simply report an error and drop the packet,
   7554 		 * since we can't sanely copy a jumbo packet to a single
   7555 		 * buffer.
   7556 		 */
   7557 retry:
   7558 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7559 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7560 		if (__predict_false(error)) {
   7561 			if (error == EFBIG) {
   7562 				if (remap == true) {
   7563 					struct mbuf *m;
   7564 
   7565 					remap = false;
   7566 					m = m_defrag(m0, M_NOWAIT);
   7567 					if (m != NULL) {
   7568 						WM_Q_EVCNT_INCR(txq, defrag);
   7569 						m0 = m;
   7570 						goto retry;
   7571 					}
   7572 				}
   7573 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7574 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7575 				    "DMA segments, dropping...\n",
   7576 				    device_xname(sc->sc_dev));
   7577 				wm_dump_mbuf_chain(sc, m0);
   7578 				m_freem(m0);
   7579 				continue;
   7580 			}
   7581 			/* Short on resources, just stop for now. */
   7582 			DPRINTF(WM_DEBUG_TX,
   7583 			    ("%s: TX: dmamap load failed: %d\n",
   7584 				device_xname(sc->sc_dev), error));
   7585 			break;
   7586 		}
   7587 
   7588 		segs_needed = dmamap->dm_nsegs;
   7589 		if (use_tso) {
   7590 			/* For sentinel descriptor; see below. */
   7591 			segs_needed++;
   7592 		}
   7593 
   7594 		/*
   7595 		 * Ensure we have enough descriptors free to describe
   7596 		 * the packet. Note, we always reserve one descriptor
   7597 		 * at the end of the ring due to the semantics of the
   7598 		 * TDT register, plus one more in the event we need
   7599 		 * to load offload context.
   7600 		 */
   7601 		if (segs_needed > txq->txq_free - 2) {
   7602 			/*
   7603 			 * Not enough free descriptors to transmit this
   7604 			 * packet.  We haven't committed anything yet,
   7605 			 * so just unload the DMA map, put the packet
   7606 			 * pack on the queue, and punt. Notify the upper
   7607 			 * layer that there are no more slots left.
   7608 			 */
   7609 			DPRINTF(WM_DEBUG_TX,
   7610 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7611 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7612 				segs_needed, txq->txq_free - 1));
   7613 			if (!is_transmit)
   7614 				ifp->if_flags |= IFF_OACTIVE;
   7615 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7616 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7617 			WM_Q_EVCNT_INCR(txq, txdstall);
   7618 			break;
   7619 		}
   7620 
   7621 		/*
   7622 		 * Check for 82547 Tx FIFO bug. We need to do this
   7623 		 * once we know we can transmit the packet, since we
   7624 		 * do some internal FIFO space accounting here.
   7625 		 */
   7626 		if (sc->sc_type == WM_T_82547 &&
   7627 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7628 			DPRINTF(WM_DEBUG_TX,
   7629 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7630 				device_xname(sc->sc_dev)));
   7631 			if (!is_transmit)
   7632 				ifp->if_flags |= IFF_OACTIVE;
   7633 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7634 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7635 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7636 			break;
   7637 		}
   7638 
   7639 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7640 
   7641 		DPRINTF(WM_DEBUG_TX,
   7642 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7643 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7644 
   7645 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7646 
   7647 		/*
   7648 		 * Store a pointer to the packet so that we can free it
   7649 		 * later.
   7650 		 *
   7651 		 * Initially, we consider the number of descriptors the
   7652 		 * packet uses the number of DMA segments.  This may be
   7653 		 * incremented by 1 if we do checksum offload (a descriptor
   7654 		 * is used to set the checksum context).
   7655 		 */
   7656 		txs->txs_mbuf = m0;
   7657 		txs->txs_firstdesc = txq->txq_next;
   7658 		txs->txs_ndesc = segs_needed;
   7659 
   7660 		/* Set up offload parameters for this packet. */
   7661 		if (m0->m_pkthdr.csum_flags &
   7662 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7663 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7664 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7665 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7666 					  &cksumfields) != 0) {
   7667 				/* Error message already displayed. */
   7668 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7669 				continue;
   7670 			}
   7671 		} else {
   7672 			cksumcmd = 0;
   7673 			cksumfields = 0;
   7674 		}
   7675 
   7676 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7677 
   7678 		/* Sync the DMA map. */
   7679 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7680 		    BUS_DMASYNC_PREWRITE);
   7681 
   7682 		/* Initialize the transmit descriptor. */
   7683 		for (nexttx = txq->txq_next, seg = 0;
   7684 		     seg < dmamap->dm_nsegs; seg++) {
   7685 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7686 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7687 			     seglen != 0;
   7688 			     curaddr += curlen, seglen -= curlen,
   7689 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7690 				curlen = seglen;
   7691 
   7692 				/*
   7693 				 * So says the Linux driver:
   7694 				 * Work around for premature descriptor
   7695 				 * write-backs in TSO mode.  Append a
   7696 				 * 4-byte sentinel descriptor.
   7697 				 */
   7698 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7699 				    curlen > 8)
   7700 					curlen -= 4;
   7701 
   7702 				wm_set_dma_addr(
   7703 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7704 				txq->txq_descs[nexttx].wtx_cmdlen
   7705 				    = htole32(cksumcmd | curlen);
   7706 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7707 				    = 0;
   7708 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7709 				    = cksumfields;
   7710 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7711 				lasttx = nexttx;
   7712 
   7713 				DPRINTF(WM_DEBUG_TX,
   7714 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7715 					"len %#04zx\n",
   7716 					device_xname(sc->sc_dev), nexttx,
   7717 					(uint64_t)curaddr, curlen));
   7718 			}
   7719 		}
   7720 
   7721 		KASSERT(lasttx != -1);
   7722 
   7723 		/*
   7724 		 * Set up the command byte on the last descriptor of
   7725 		 * the packet. If we're in the interrupt delay window,
   7726 		 * delay the interrupt.
   7727 		 */
   7728 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7729 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7730 
   7731 		/*
   7732 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7733 		 * up the descriptor to encapsulate the packet for us.
   7734 		 *
   7735 		 * This is only valid on the last descriptor of the packet.
   7736 		 */
   7737 		if (vlan_has_tag(m0)) {
   7738 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7739 			    htole32(WTX_CMD_VLE);
   7740 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7741 			    = htole16(vlan_get_tag(m0));
   7742 		}
   7743 
   7744 		txs->txs_lastdesc = lasttx;
   7745 
   7746 		DPRINTF(WM_DEBUG_TX,
   7747 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7748 			device_xname(sc->sc_dev),
   7749 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7750 
   7751 		/* Sync the descriptors we're using. */
   7752 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7753 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7754 
   7755 		/* Give the packet to the chip. */
   7756 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7757 
   7758 		DPRINTF(WM_DEBUG_TX,
   7759 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7760 
   7761 		DPRINTF(WM_DEBUG_TX,
   7762 		    ("%s: TX: finished transmitting packet, job %d\n",
   7763 			device_xname(sc->sc_dev), txq->txq_snext));
   7764 
   7765 		/* Advance the tx pointer. */
   7766 		txq->txq_free -= txs->txs_ndesc;
   7767 		txq->txq_next = nexttx;
   7768 
   7769 		txq->txq_sfree--;
   7770 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7771 
   7772 		/* Pass the packet to any BPF listeners. */
   7773 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7774 	}
   7775 
   7776 	if (m0 != NULL) {
   7777 		if (!is_transmit)
   7778 			ifp->if_flags |= IFF_OACTIVE;
   7779 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7780 		WM_Q_EVCNT_INCR(txq, descdrop);
   7781 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7782 			__func__));
   7783 		m_freem(m0);
   7784 	}
   7785 
   7786 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7787 		/* No more slots; notify upper layer. */
   7788 		if (!is_transmit)
   7789 			ifp->if_flags |= IFF_OACTIVE;
   7790 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7791 	}
   7792 
   7793 	if (txq->txq_free != ofree) {
   7794 		/* Set a watchdog timer in case the chip flakes out. */
   7795 		txq->txq_lastsent = time_uptime;
   7796 		txq->txq_sending = true;
   7797 	}
   7798 }
   7799 
   7800 /*
   7801  * wm_nq_tx_offload:
   7802  *
   7803  *	Set up TCP/IP checksumming parameters for the
   7804  *	specified packet, for NEWQUEUE devices
   7805  */
   7806 static int
   7807 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7808     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7809 {
   7810 	struct mbuf *m0 = txs->txs_mbuf;
   7811 	uint32_t vl_len, mssidx, cmdc;
   7812 	struct ether_header *eh;
   7813 	int offset, iphl;
   7814 
   7815 	/*
   7816 	 * XXX It would be nice if the mbuf pkthdr had offset
   7817 	 * fields for the protocol headers.
   7818 	 */
   7819 	*cmdlenp = 0;
   7820 	*fieldsp = 0;
   7821 
   7822 	eh = mtod(m0, struct ether_header *);
   7823 	switch (htons(eh->ether_type)) {
   7824 	case ETHERTYPE_IP:
   7825 	case ETHERTYPE_IPV6:
   7826 		offset = ETHER_HDR_LEN;
   7827 		break;
   7828 
   7829 	case ETHERTYPE_VLAN:
   7830 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7831 		break;
   7832 
   7833 	default:
   7834 		/* Don't support this protocol or encapsulation. */
   7835 		*do_csum = false;
   7836 		return 0;
   7837 	}
   7838 	*do_csum = true;
   7839 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7840 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7841 
   7842 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7843 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7844 
   7845 	if ((m0->m_pkthdr.csum_flags &
   7846 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7847 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7848 	} else {
   7849 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7850 	}
   7851 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7852 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7853 
   7854 	if (vlan_has_tag(m0)) {
   7855 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7856 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7857 		*cmdlenp |= NQTX_CMD_VLE;
   7858 	}
   7859 
   7860 	mssidx = 0;
   7861 
   7862 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7863 		int hlen = offset + iphl;
   7864 		int tcp_hlen;
   7865 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7866 
   7867 		if (__predict_false(m0->m_len <
   7868 				    (hlen + sizeof(struct tcphdr)))) {
   7869 			/*
   7870 			 * TCP/IP headers are not in the first mbuf; we need
   7871 			 * to do this the slow and painful way. Let's just
   7872 			 * hope this doesn't happen very often.
   7873 			 */
   7874 			struct tcphdr th;
   7875 
   7876 			WM_Q_EVCNT_INCR(txq, tsopain);
   7877 
   7878 			m_copydata(m0, hlen, sizeof(th), &th);
   7879 			if (v4) {
   7880 				struct ip ip;
   7881 
   7882 				m_copydata(m0, offset, sizeof(ip), &ip);
   7883 				ip.ip_len = 0;
   7884 				m_copyback(m0,
   7885 				    offset + offsetof(struct ip, ip_len),
   7886 				    sizeof(ip.ip_len), &ip.ip_len);
   7887 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7888 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7889 			} else {
   7890 				struct ip6_hdr ip6;
   7891 
   7892 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7893 				ip6.ip6_plen = 0;
   7894 				m_copyback(m0,
   7895 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7896 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7897 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7898 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7899 			}
   7900 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7901 			    sizeof(th.th_sum), &th.th_sum);
   7902 
   7903 			tcp_hlen = th.th_off << 2;
   7904 		} else {
   7905 			/*
   7906 			 * TCP/IP headers are in the first mbuf; we can do
   7907 			 * this the easy way.
   7908 			 */
   7909 			struct tcphdr *th;
   7910 
   7911 			if (v4) {
   7912 				struct ip *ip =
   7913 				    (void *)(mtod(m0, char *) + offset);
   7914 				th = (void *)(mtod(m0, char *) + hlen);
   7915 
   7916 				ip->ip_len = 0;
   7917 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7918 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7919 			} else {
   7920 				struct ip6_hdr *ip6 =
   7921 				    (void *)(mtod(m0, char *) + offset);
   7922 				th = (void *)(mtod(m0, char *) + hlen);
   7923 
   7924 				ip6->ip6_plen = 0;
   7925 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7926 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7927 			}
   7928 			tcp_hlen = th->th_off << 2;
   7929 		}
   7930 		hlen += tcp_hlen;
   7931 		*cmdlenp |= NQTX_CMD_TSE;
   7932 
   7933 		if (v4) {
   7934 			WM_Q_EVCNT_INCR(txq, tso);
   7935 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7936 		} else {
   7937 			WM_Q_EVCNT_INCR(txq, tso6);
   7938 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7939 		}
   7940 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7941 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7942 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7943 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7944 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7945 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7946 	} else {
   7947 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7948 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7949 	}
   7950 
   7951 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7952 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7953 		cmdc |= NQTXC_CMD_IP4;
   7954 	}
   7955 
   7956 	if (m0->m_pkthdr.csum_flags &
   7957 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7958 		WM_Q_EVCNT_INCR(txq, tusum);
   7959 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7960 			cmdc |= NQTXC_CMD_TCP;
   7961 		else
   7962 			cmdc |= NQTXC_CMD_UDP;
   7963 
   7964 		cmdc |= NQTXC_CMD_IP4;
   7965 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7966 	}
   7967 	if (m0->m_pkthdr.csum_flags &
   7968 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7969 		WM_Q_EVCNT_INCR(txq, tusum6);
   7970 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7971 			cmdc |= NQTXC_CMD_TCP;
   7972 		else
   7973 			cmdc |= NQTXC_CMD_UDP;
   7974 
   7975 		cmdc |= NQTXC_CMD_IP6;
   7976 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7977 	}
   7978 
   7979 	/*
   7980 	 * We don't have to write context descriptor for every packet to
   7981 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7982 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7983 	 * controllers.
   7984 	 * It would be overhead to write context descriptor for every packet,
   7985 	 * however it does not cause problems.
   7986 	 */
   7987 	/* Fill in the context descriptor. */
   7988 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7989 	    htole32(vl_len);
   7990 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7991 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7992 	    htole32(cmdc);
   7993 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7994 	    htole32(mssidx);
   7995 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7996 	DPRINTF(WM_DEBUG_TX,
   7997 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7998 		txq->txq_next, 0, vl_len));
   7999 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8000 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8001 	txs->txs_ndesc++;
   8002 	return 0;
   8003 }
   8004 
   8005 /*
   8006  * wm_nq_start:		[ifnet interface function]
   8007  *
   8008  *	Start packet transmission on the interface for NEWQUEUE devices
   8009  */
   8010 static void
   8011 wm_nq_start(struct ifnet *ifp)
   8012 {
   8013 	struct wm_softc *sc = ifp->if_softc;
   8014 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8015 
   8016 #ifdef WM_MPSAFE
   8017 	KASSERT(if_is_mpsafe(ifp));
   8018 #endif
   8019 	/*
   8020 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8021 	 */
   8022 
   8023 	mutex_enter(txq->txq_lock);
   8024 	if (!txq->txq_stopping)
   8025 		wm_nq_start_locked(ifp);
   8026 	mutex_exit(txq->txq_lock);
   8027 }
   8028 
   8029 static void
   8030 wm_nq_start_locked(struct ifnet *ifp)
   8031 {
   8032 	struct wm_softc *sc = ifp->if_softc;
   8033 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8034 
   8035 	wm_nq_send_common_locked(ifp, txq, false);
   8036 }
   8037 
   8038 static int
   8039 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8040 {
   8041 	int qid;
   8042 	struct wm_softc *sc = ifp->if_softc;
   8043 	struct wm_txqueue *txq;
   8044 
   8045 	qid = wm_select_txqueue(ifp, m);
   8046 	txq = &sc->sc_queue[qid].wmq_txq;
   8047 
   8048 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8049 		m_freem(m);
   8050 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8051 		return ENOBUFS;
   8052 	}
   8053 
   8054 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8055 	ifp->if_obytes += m->m_pkthdr.len;
   8056 	if (m->m_flags & M_MCAST)
   8057 		ifp->if_omcasts++;
   8058 
   8059 	/*
   8060 	 * The situations which this mutex_tryenter() fails at running time
   8061 	 * are below two patterns.
   8062 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8063 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8064 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8065 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8066 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8067 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8068 	 * stuck, either.
   8069 	 */
   8070 	if (mutex_tryenter(txq->txq_lock)) {
   8071 		if (!txq->txq_stopping)
   8072 			wm_nq_transmit_locked(ifp, txq);
   8073 		mutex_exit(txq->txq_lock);
   8074 	}
   8075 
   8076 	return 0;
   8077 }
   8078 
   8079 static void
   8080 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8081 {
   8082 
   8083 	wm_nq_send_common_locked(ifp, txq, true);
   8084 }
   8085 
   8086 static void
   8087 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8088     bool is_transmit)
   8089 {
   8090 	struct wm_softc *sc = ifp->if_softc;
   8091 	struct mbuf *m0;
   8092 	struct wm_txsoft *txs;
   8093 	bus_dmamap_t dmamap;
   8094 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8095 	bool do_csum, sent;
   8096 	bool remap = true;
   8097 
   8098 	KASSERT(mutex_owned(txq->txq_lock));
   8099 
   8100 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8101 		return;
   8102 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8103 		return;
   8104 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8105 		return;
   8106 
   8107 	sent = false;
   8108 
   8109 	/*
   8110 	 * Loop through the send queue, setting up transmit descriptors
   8111 	 * until we drain the queue, or use up all available transmit
   8112 	 * descriptors.
   8113 	 */
   8114 	for (;;) {
   8115 		m0 = NULL;
   8116 
   8117 		/* Get a work queue entry. */
   8118 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8119 			wm_txeof(txq, UINT_MAX);
   8120 			if (txq->txq_sfree == 0) {
   8121 				DPRINTF(WM_DEBUG_TX,
   8122 				    ("%s: TX: no free job descriptors\n",
   8123 					device_xname(sc->sc_dev)));
   8124 				WM_Q_EVCNT_INCR(txq, txsstall);
   8125 				break;
   8126 			}
   8127 		}
   8128 
   8129 		/* Grab a packet off the queue. */
   8130 		if (is_transmit)
   8131 			m0 = pcq_get(txq->txq_interq);
   8132 		else
   8133 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8134 		if (m0 == NULL)
   8135 			break;
   8136 
   8137 		DPRINTF(WM_DEBUG_TX,
   8138 		    ("%s: TX: have packet to transmit: %p\n",
   8139 		    device_xname(sc->sc_dev), m0));
   8140 
   8141 		txs = &txq->txq_soft[txq->txq_snext];
   8142 		dmamap = txs->txs_dmamap;
   8143 
   8144 		/*
   8145 		 * Load the DMA map.  If this fails, the packet either
   8146 		 * didn't fit in the allotted number of segments, or we
   8147 		 * were short on resources.  For the too-many-segments
   8148 		 * case, we simply report an error and drop the packet,
   8149 		 * since we can't sanely copy a jumbo packet to a single
   8150 		 * buffer.
   8151 		 */
   8152 retry:
   8153 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8154 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8155 		if (__predict_false(error)) {
   8156 			if (error == EFBIG) {
   8157 				if (remap == true) {
   8158 					struct mbuf *m;
   8159 
   8160 					remap = false;
   8161 					m = m_defrag(m0, M_NOWAIT);
   8162 					if (m != NULL) {
   8163 						WM_Q_EVCNT_INCR(txq, defrag);
   8164 						m0 = m;
   8165 						goto retry;
   8166 					}
   8167 				}
   8168 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8169 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8170 				    "DMA segments, dropping...\n",
   8171 				    device_xname(sc->sc_dev));
   8172 				wm_dump_mbuf_chain(sc, m0);
   8173 				m_freem(m0);
   8174 				continue;
   8175 			}
   8176 			/* Short on resources, just stop for now. */
   8177 			DPRINTF(WM_DEBUG_TX,
   8178 			    ("%s: TX: dmamap load failed: %d\n",
   8179 				device_xname(sc->sc_dev), error));
   8180 			break;
   8181 		}
   8182 
   8183 		segs_needed = dmamap->dm_nsegs;
   8184 
   8185 		/*
   8186 		 * Ensure we have enough descriptors free to describe
   8187 		 * the packet. Note, we always reserve one descriptor
   8188 		 * at the end of the ring due to the semantics of the
   8189 		 * TDT register, plus one more in the event we need
   8190 		 * to load offload context.
   8191 		 */
   8192 		if (segs_needed > txq->txq_free - 2) {
   8193 			/*
   8194 			 * Not enough free descriptors to transmit this
   8195 			 * packet.  We haven't committed anything yet,
   8196 			 * so just unload the DMA map, put the packet
   8197 			 * pack on the queue, and punt. Notify the upper
   8198 			 * layer that there are no more slots left.
   8199 			 */
   8200 			DPRINTF(WM_DEBUG_TX,
   8201 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8202 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8203 				segs_needed, txq->txq_free - 1));
   8204 			if (!is_transmit)
   8205 				ifp->if_flags |= IFF_OACTIVE;
   8206 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8207 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8208 			WM_Q_EVCNT_INCR(txq, txdstall);
   8209 			break;
   8210 		}
   8211 
   8212 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8213 
   8214 		DPRINTF(WM_DEBUG_TX,
   8215 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8216 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8217 
   8218 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8219 
   8220 		/*
   8221 		 * Store a pointer to the packet so that we can free it
   8222 		 * later.
   8223 		 *
   8224 		 * Initially, we consider the number of descriptors the
   8225 		 * packet uses the number of DMA segments.  This may be
   8226 		 * incremented by 1 if we do checksum offload (a descriptor
   8227 		 * is used to set the checksum context).
   8228 		 */
   8229 		txs->txs_mbuf = m0;
   8230 		txs->txs_firstdesc = txq->txq_next;
   8231 		txs->txs_ndesc = segs_needed;
   8232 
   8233 		/* Set up offload parameters for this packet. */
   8234 		uint32_t cmdlen, fields, dcmdlen;
   8235 		if (m0->m_pkthdr.csum_flags &
   8236 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8237 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8238 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8239 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8240 			    &do_csum) != 0) {
   8241 				/* Error message already displayed. */
   8242 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8243 				continue;
   8244 			}
   8245 		} else {
   8246 			do_csum = false;
   8247 			cmdlen = 0;
   8248 			fields = 0;
   8249 		}
   8250 
   8251 		/* Sync the DMA map. */
   8252 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8253 		    BUS_DMASYNC_PREWRITE);
   8254 
   8255 		/* Initialize the first transmit descriptor. */
   8256 		nexttx = txq->txq_next;
   8257 		if (!do_csum) {
   8258 			/* Setup a legacy descriptor */
   8259 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8260 			    dmamap->dm_segs[0].ds_addr);
   8261 			txq->txq_descs[nexttx].wtx_cmdlen =
   8262 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8263 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8264 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8265 			if (vlan_has_tag(m0)) {
   8266 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8267 				    htole32(WTX_CMD_VLE);
   8268 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8269 				    htole16(vlan_get_tag(m0));
   8270 			} else
   8271 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8272 
   8273 			dcmdlen = 0;
   8274 		} else {
   8275 			/* Setup an advanced data descriptor */
   8276 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8277 			    htole64(dmamap->dm_segs[0].ds_addr);
   8278 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8279 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8280 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8281 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8282 			    htole32(fields);
   8283 			DPRINTF(WM_DEBUG_TX,
   8284 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8285 				device_xname(sc->sc_dev), nexttx,
   8286 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8287 			DPRINTF(WM_DEBUG_TX,
   8288 			    ("\t 0x%08x%08x\n", fields,
   8289 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8290 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8291 		}
   8292 
   8293 		lasttx = nexttx;
   8294 		nexttx = WM_NEXTTX(txq, nexttx);
   8295 		/*
   8296 		 * Fill in the next descriptors. legacy or advanced format
   8297 		 * is the same here
   8298 		 */
   8299 		for (seg = 1; seg < dmamap->dm_nsegs;
   8300 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8301 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8302 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8303 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8304 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8305 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8306 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8307 			lasttx = nexttx;
   8308 
   8309 			DPRINTF(WM_DEBUG_TX,
   8310 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8311 				device_xname(sc->sc_dev), nexttx,
   8312 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8313 				dmamap->dm_segs[seg].ds_len));
   8314 		}
   8315 
   8316 		KASSERT(lasttx != -1);
   8317 
   8318 		/*
   8319 		 * Set up the command byte on the last descriptor of
   8320 		 * the packet. If we're in the interrupt delay window,
   8321 		 * delay the interrupt.
   8322 		 */
   8323 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8324 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8325 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8326 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8327 
   8328 		txs->txs_lastdesc = lasttx;
   8329 
   8330 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8331 		    device_xname(sc->sc_dev),
   8332 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8333 
   8334 		/* Sync the descriptors we're using. */
   8335 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8336 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8337 
   8338 		/* Give the packet to the chip. */
   8339 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8340 		sent = true;
   8341 
   8342 		DPRINTF(WM_DEBUG_TX,
   8343 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8344 
   8345 		DPRINTF(WM_DEBUG_TX,
   8346 		    ("%s: TX: finished transmitting packet, job %d\n",
   8347 			device_xname(sc->sc_dev), txq->txq_snext));
   8348 
   8349 		/* Advance the tx pointer. */
   8350 		txq->txq_free -= txs->txs_ndesc;
   8351 		txq->txq_next = nexttx;
   8352 
   8353 		txq->txq_sfree--;
   8354 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8355 
   8356 		/* Pass the packet to any BPF listeners. */
   8357 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8358 	}
   8359 
   8360 	if (m0 != NULL) {
   8361 		if (!is_transmit)
   8362 			ifp->if_flags |= IFF_OACTIVE;
   8363 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8364 		WM_Q_EVCNT_INCR(txq, descdrop);
   8365 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8366 			__func__));
   8367 		m_freem(m0);
   8368 	}
   8369 
   8370 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8371 		/* No more slots; notify upper layer. */
   8372 		if (!is_transmit)
   8373 			ifp->if_flags |= IFF_OACTIVE;
   8374 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8375 	}
   8376 
   8377 	if (sent) {
   8378 		/* Set a watchdog timer in case the chip flakes out. */
   8379 		txq->txq_lastsent = time_uptime;
   8380 		txq->txq_sending = true;
   8381 	}
   8382 }
   8383 
   8384 static void
   8385 wm_deferred_start_locked(struct wm_txqueue *txq)
   8386 {
   8387 	struct wm_softc *sc = txq->txq_sc;
   8388 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8389 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8390 	int qid = wmq->wmq_id;
   8391 
   8392 	KASSERT(mutex_owned(txq->txq_lock));
   8393 
   8394 	if (txq->txq_stopping) {
   8395 		mutex_exit(txq->txq_lock);
   8396 		return;
   8397 	}
   8398 
   8399 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8400 		/* XXX need for ALTQ or one CPU system */
   8401 		if (qid == 0)
   8402 			wm_nq_start_locked(ifp);
   8403 		wm_nq_transmit_locked(ifp, txq);
   8404 	} else {
   8405 		/* XXX need for ALTQ or one CPU system */
   8406 		if (qid == 0)
   8407 			wm_start_locked(ifp);
   8408 		wm_transmit_locked(ifp, txq);
   8409 	}
   8410 }
   8411 
   8412 /* Interrupt */
   8413 
   8414 /*
   8415  * wm_txeof:
   8416  *
   8417  *	Helper; handle transmit interrupts.
   8418  */
   8419 static bool
   8420 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8421 {
   8422 	struct wm_softc *sc = txq->txq_sc;
   8423 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8424 	struct wm_txsoft *txs;
   8425 	int count = 0;
   8426 	int i;
   8427 	uint8_t status;
   8428 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8429 	bool more = false;
   8430 
   8431 	KASSERT(mutex_owned(txq->txq_lock));
   8432 
   8433 	if (txq->txq_stopping)
   8434 		return false;
   8435 
   8436 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8437 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8438 	if (wmq->wmq_id == 0)
   8439 		ifp->if_flags &= ~IFF_OACTIVE;
   8440 
   8441 	/*
   8442 	 * Go through the Tx list and free mbufs for those
   8443 	 * frames which have been transmitted.
   8444 	 */
   8445 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8446 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8447 		if (limit-- == 0) {
   8448 			more = true;
   8449 			DPRINTF(WM_DEBUG_TX,
   8450 			    ("%s: TX: loop limited, job %d is not processed\n",
   8451 				device_xname(sc->sc_dev), i));
   8452 			break;
   8453 		}
   8454 
   8455 		txs = &txq->txq_soft[i];
   8456 
   8457 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8458 			device_xname(sc->sc_dev), i));
   8459 
   8460 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8461 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8462 
   8463 		status =
   8464 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8465 		if ((status & WTX_ST_DD) == 0) {
   8466 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8467 			    BUS_DMASYNC_PREREAD);
   8468 			break;
   8469 		}
   8470 
   8471 		count++;
   8472 		DPRINTF(WM_DEBUG_TX,
   8473 		    ("%s: TX: job %d done: descs %d..%d\n",
   8474 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8475 		    txs->txs_lastdesc));
   8476 
   8477 		/*
   8478 		 * XXX We should probably be using the statistics
   8479 		 * XXX registers, but I don't know if they exist
   8480 		 * XXX on chips before the i82544.
   8481 		 */
   8482 
   8483 #ifdef WM_EVENT_COUNTERS
   8484 		if (status & WTX_ST_TU)
   8485 			WM_Q_EVCNT_INCR(txq, underrun);
   8486 #endif /* WM_EVENT_COUNTERS */
   8487 
   8488 		/*
   8489 		 * 82574 and newer's document says the status field has neither
   8490 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8491 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8492 		 * Developer's Manual", 82574 datasheet and newer.
   8493 		 *
   8494 		 * XXX I saw the LC bit was set on I218 even though the media
   8495 		 * was full duplex, so the bit might be used for other
   8496 		 * meaning ...(I have no document).
   8497 		 */
   8498 
   8499 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8500 		    && ((sc->sc_type < WM_T_82574)
   8501 			|| (sc->sc_type == WM_T_80003))) {
   8502 			ifp->if_oerrors++;
   8503 			if (status & WTX_ST_LC)
   8504 				log(LOG_WARNING, "%s: late collision\n",
   8505 				    device_xname(sc->sc_dev));
   8506 			else if (status & WTX_ST_EC) {
   8507 				ifp->if_collisions +=
   8508 				    TX_COLLISION_THRESHOLD + 1;
   8509 				log(LOG_WARNING, "%s: excessive collisions\n",
   8510 				    device_xname(sc->sc_dev));
   8511 			}
   8512 		} else
   8513 			ifp->if_opackets++;
   8514 
   8515 		txq->txq_packets++;
   8516 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8517 
   8518 		txq->txq_free += txs->txs_ndesc;
   8519 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8520 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8521 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8522 		m_freem(txs->txs_mbuf);
   8523 		txs->txs_mbuf = NULL;
   8524 	}
   8525 
   8526 	/* Update the dirty transmit buffer pointer. */
   8527 	txq->txq_sdirty = i;
   8528 	DPRINTF(WM_DEBUG_TX,
   8529 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8530 
   8531 	if (count != 0)
   8532 		rnd_add_uint32(&sc->rnd_source, count);
   8533 
   8534 	/*
   8535 	 * If there are no more pending transmissions, cancel the watchdog
   8536 	 * timer.
   8537 	 */
   8538 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8539 		txq->txq_sending = false;
   8540 
   8541 	return more;
   8542 }
   8543 
   8544 static inline uint32_t
   8545 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8546 {
   8547 	struct wm_softc *sc = rxq->rxq_sc;
   8548 
   8549 	if (sc->sc_type == WM_T_82574)
   8550 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8551 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8552 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8553 	else
   8554 		return rxq->rxq_descs[idx].wrx_status;
   8555 }
   8556 
   8557 static inline uint32_t
   8558 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8559 {
   8560 	struct wm_softc *sc = rxq->rxq_sc;
   8561 
   8562 	if (sc->sc_type == WM_T_82574)
   8563 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8564 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8565 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8566 	else
   8567 		return rxq->rxq_descs[idx].wrx_errors;
   8568 }
   8569 
   8570 static inline uint16_t
   8571 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8572 {
   8573 	struct wm_softc *sc = rxq->rxq_sc;
   8574 
   8575 	if (sc->sc_type == WM_T_82574)
   8576 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8577 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8578 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8579 	else
   8580 		return rxq->rxq_descs[idx].wrx_special;
   8581 }
   8582 
   8583 static inline int
   8584 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8585 {
   8586 	struct wm_softc *sc = rxq->rxq_sc;
   8587 
   8588 	if (sc->sc_type == WM_T_82574)
   8589 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8590 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8591 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8592 	else
   8593 		return rxq->rxq_descs[idx].wrx_len;
   8594 }
   8595 
   8596 #ifdef WM_DEBUG
   8597 static inline uint32_t
   8598 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8599 {
   8600 	struct wm_softc *sc = rxq->rxq_sc;
   8601 
   8602 	if (sc->sc_type == WM_T_82574)
   8603 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8604 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8605 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8606 	else
   8607 		return 0;
   8608 }
   8609 
   8610 static inline uint8_t
   8611 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8612 {
   8613 	struct wm_softc *sc = rxq->rxq_sc;
   8614 
   8615 	if (sc->sc_type == WM_T_82574)
   8616 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8617 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8618 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8619 	else
   8620 		return 0;
   8621 }
   8622 #endif /* WM_DEBUG */
   8623 
   8624 static inline bool
   8625 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8626     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8627 {
   8628 
   8629 	if (sc->sc_type == WM_T_82574)
   8630 		return (status & ext_bit) != 0;
   8631 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8632 		return (status & nq_bit) != 0;
   8633 	else
   8634 		return (status & legacy_bit) != 0;
   8635 }
   8636 
   8637 static inline bool
   8638 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8639     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8640 {
   8641 
   8642 	if (sc->sc_type == WM_T_82574)
   8643 		return (error & ext_bit) != 0;
   8644 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8645 		return (error & nq_bit) != 0;
   8646 	else
   8647 		return (error & legacy_bit) != 0;
   8648 }
   8649 
   8650 static inline bool
   8651 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8652 {
   8653 
   8654 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8655 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8656 		return true;
   8657 	else
   8658 		return false;
   8659 }
   8660 
   8661 static inline bool
   8662 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8663 {
   8664 	struct wm_softc *sc = rxq->rxq_sc;
   8665 
   8666 	/* XXX missing error bit for newqueue? */
   8667 	if (wm_rxdesc_is_set_error(sc, errors,
   8668 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8669 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8670 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8671 		NQRXC_ERROR_RXE)) {
   8672 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8673 		    EXTRXC_ERROR_SE, 0))
   8674 			log(LOG_WARNING, "%s: symbol error\n",
   8675 			    device_xname(sc->sc_dev));
   8676 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8677 		    EXTRXC_ERROR_SEQ, 0))
   8678 			log(LOG_WARNING, "%s: receive sequence error\n",
   8679 			    device_xname(sc->sc_dev));
   8680 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8681 		    EXTRXC_ERROR_CE, 0))
   8682 			log(LOG_WARNING, "%s: CRC error\n",
   8683 			    device_xname(sc->sc_dev));
   8684 		return true;
   8685 	}
   8686 
   8687 	return false;
   8688 }
   8689 
   8690 static inline bool
   8691 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8692 {
   8693 	struct wm_softc *sc = rxq->rxq_sc;
   8694 
   8695 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8696 		NQRXC_STATUS_DD)) {
   8697 		/* We have processed all of the receive descriptors. */
   8698 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8699 		return false;
   8700 	}
   8701 
   8702 	return true;
   8703 }
   8704 
   8705 static inline bool
   8706 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8707     uint16_t vlantag, struct mbuf *m)
   8708 {
   8709 
   8710 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8711 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8712 		vlan_set_tag(m, le16toh(vlantag));
   8713 	}
   8714 
   8715 	return true;
   8716 }
   8717 
   8718 static inline void
   8719 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8720     uint32_t errors, struct mbuf *m)
   8721 {
   8722 	struct wm_softc *sc = rxq->rxq_sc;
   8723 
   8724 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8725 		if (wm_rxdesc_is_set_status(sc, status,
   8726 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8727 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8728 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8729 			if (wm_rxdesc_is_set_error(sc, errors,
   8730 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8731 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8732 		}
   8733 		if (wm_rxdesc_is_set_status(sc, status,
   8734 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8735 			/*
   8736 			 * Note: we don't know if this was TCP or UDP,
   8737 			 * so we just set both bits, and expect the
   8738 			 * upper layers to deal.
   8739 			 */
   8740 			WM_Q_EVCNT_INCR(rxq, tusum);
   8741 			m->m_pkthdr.csum_flags |=
   8742 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8743 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8744 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8745 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8746 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8747 		}
   8748 	}
   8749 }
   8750 
   8751 /*
   8752  * wm_rxeof:
   8753  *
   8754  *	Helper; handle receive interrupts.
   8755  */
   8756 static bool
   8757 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8758 {
   8759 	struct wm_softc *sc = rxq->rxq_sc;
   8760 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8761 	struct wm_rxsoft *rxs;
   8762 	struct mbuf *m;
   8763 	int i, len;
   8764 	int count = 0;
   8765 	uint32_t status, errors;
   8766 	uint16_t vlantag;
   8767 	bool more = false;
   8768 
   8769 	KASSERT(mutex_owned(rxq->rxq_lock));
   8770 
   8771 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8772 		if (limit-- == 0) {
   8773 			rxq->rxq_ptr = i;
   8774 			more = true;
   8775 			DPRINTF(WM_DEBUG_RX,
   8776 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8777 				device_xname(sc->sc_dev), i));
   8778 			break;
   8779 		}
   8780 
   8781 		rxs = &rxq->rxq_soft[i];
   8782 
   8783 		DPRINTF(WM_DEBUG_RX,
   8784 		    ("%s: RX: checking descriptor %d\n",
   8785 			device_xname(sc->sc_dev), i));
   8786 		wm_cdrxsync(rxq, i,
   8787 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8788 
   8789 		status = wm_rxdesc_get_status(rxq, i);
   8790 		errors = wm_rxdesc_get_errors(rxq, i);
   8791 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8792 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8793 #ifdef WM_DEBUG
   8794 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8795 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8796 #endif
   8797 
   8798 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8799 			/*
   8800 			 * Update the receive pointer holding rxq_lock
   8801 			 * consistent with increment counter.
   8802 			 */
   8803 			rxq->rxq_ptr = i;
   8804 			break;
   8805 		}
   8806 
   8807 		count++;
   8808 		if (__predict_false(rxq->rxq_discard)) {
   8809 			DPRINTF(WM_DEBUG_RX,
   8810 			    ("%s: RX: discarding contents of descriptor %d\n",
   8811 				device_xname(sc->sc_dev), i));
   8812 			wm_init_rxdesc(rxq, i);
   8813 			if (wm_rxdesc_is_eop(rxq, status)) {
   8814 				/* Reset our state. */
   8815 				DPRINTF(WM_DEBUG_RX,
   8816 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8817 					device_xname(sc->sc_dev)));
   8818 				rxq->rxq_discard = 0;
   8819 			}
   8820 			continue;
   8821 		}
   8822 
   8823 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8824 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8825 
   8826 		m = rxs->rxs_mbuf;
   8827 
   8828 		/*
   8829 		 * Add a new receive buffer to the ring, unless of
   8830 		 * course the length is zero. Treat the latter as a
   8831 		 * failed mapping.
   8832 		 */
   8833 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8834 			/*
   8835 			 * Failed, throw away what we've done so
   8836 			 * far, and discard the rest of the packet.
   8837 			 */
   8838 			ifp->if_ierrors++;
   8839 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8840 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8841 			wm_init_rxdesc(rxq, i);
   8842 			if (!wm_rxdesc_is_eop(rxq, status))
   8843 				rxq->rxq_discard = 1;
   8844 			if (rxq->rxq_head != NULL)
   8845 				m_freem(rxq->rxq_head);
   8846 			WM_RXCHAIN_RESET(rxq);
   8847 			DPRINTF(WM_DEBUG_RX,
   8848 			    ("%s: RX: Rx buffer allocation failed, "
   8849 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8850 				rxq->rxq_discard ? " (discard)" : ""));
   8851 			continue;
   8852 		}
   8853 
   8854 		m->m_len = len;
   8855 		rxq->rxq_len += len;
   8856 		DPRINTF(WM_DEBUG_RX,
   8857 		    ("%s: RX: buffer at %p len %d\n",
   8858 			device_xname(sc->sc_dev), m->m_data, len));
   8859 
   8860 		/* If this is not the end of the packet, keep looking. */
   8861 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8862 			WM_RXCHAIN_LINK(rxq, m);
   8863 			DPRINTF(WM_DEBUG_RX,
   8864 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8865 				device_xname(sc->sc_dev), rxq->rxq_len));
   8866 			continue;
   8867 		}
   8868 
   8869 		/*
   8870 		 * Okay, we have the entire packet now. The chip is
   8871 		 * configured to include the FCS except I350 and I21[01]
   8872 		 * (not all chips can be configured to strip it),
   8873 		 * so we need to trim it.
   8874 		 * May need to adjust length of previous mbuf in the
   8875 		 * chain if the current mbuf is too short.
   8876 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8877 		 * is always set in I350, so we don't trim it.
   8878 		 */
   8879 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8880 		    && (sc->sc_type != WM_T_I210)
   8881 		    && (sc->sc_type != WM_T_I211)) {
   8882 			if (m->m_len < ETHER_CRC_LEN) {
   8883 				rxq->rxq_tail->m_len
   8884 				    -= (ETHER_CRC_LEN - m->m_len);
   8885 				m->m_len = 0;
   8886 			} else
   8887 				m->m_len -= ETHER_CRC_LEN;
   8888 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8889 		} else
   8890 			len = rxq->rxq_len;
   8891 
   8892 		WM_RXCHAIN_LINK(rxq, m);
   8893 
   8894 		*rxq->rxq_tailp = NULL;
   8895 		m = rxq->rxq_head;
   8896 
   8897 		WM_RXCHAIN_RESET(rxq);
   8898 
   8899 		DPRINTF(WM_DEBUG_RX,
   8900 		    ("%s: RX: have entire packet, len -> %d\n",
   8901 			device_xname(sc->sc_dev), len));
   8902 
   8903 		/* If an error occurred, update stats and drop the packet. */
   8904 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8905 			m_freem(m);
   8906 			continue;
   8907 		}
   8908 
   8909 		/* No errors.  Receive the packet. */
   8910 		m_set_rcvif(m, ifp);
   8911 		m->m_pkthdr.len = len;
   8912 		/*
   8913 		 * TODO
   8914 		 * should be save rsshash and rsstype to this mbuf.
   8915 		 */
   8916 		DPRINTF(WM_DEBUG_RX,
   8917 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8918 			device_xname(sc->sc_dev), rsstype, rsshash));
   8919 
   8920 		/*
   8921 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8922 		 * for us.  Associate the tag with the packet.
   8923 		 */
   8924 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8925 			continue;
   8926 
   8927 		/* Set up checksum info for this packet. */
   8928 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8929 		/*
   8930 		 * Update the receive pointer holding rxq_lock consistent with
   8931 		 * increment counter.
   8932 		 */
   8933 		rxq->rxq_ptr = i;
   8934 		rxq->rxq_packets++;
   8935 		rxq->rxq_bytes += len;
   8936 		mutex_exit(rxq->rxq_lock);
   8937 
   8938 		/* Pass it on. */
   8939 		if_percpuq_enqueue(sc->sc_ipq, m);
   8940 
   8941 		mutex_enter(rxq->rxq_lock);
   8942 
   8943 		if (rxq->rxq_stopping)
   8944 			break;
   8945 	}
   8946 
   8947 	if (count != 0)
   8948 		rnd_add_uint32(&sc->rnd_source, count);
   8949 
   8950 	DPRINTF(WM_DEBUG_RX,
   8951 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8952 
   8953 	return more;
   8954 }
   8955 
   8956 /*
   8957  * wm_linkintr_gmii:
   8958  *
   8959  *	Helper; handle link interrupts for GMII.
   8960  */
   8961 static void
   8962 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8963 {
   8964 	device_t dev = sc->sc_dev;
   8965 	uint32_t status, reg;
   8966 	bool link;
   8967 	int rv;
   8968 
   8969 	KASSERT(WM_CORE_LOCKED(sc));
   8970 
   8971 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8972 		__func__));
   8973 
   8974 	if ((icr & ICR_LSC) == 0) {
   8975 		if (icr & ICR_RXSEQ)
   8976 			DPRINTF(WM_DEBUG_LINK,
   8977 			    ("%s: LINK Receive sequence error\n",
   8978 				device_xname(dev)));
   8979 		return;
   8980 	}
   8981 
   8982 	/* Link status changed */
   8983 	status = CSR_READ(sc, WMREG_STATUS);
   8984 	link = status & STATUS_LU;
   8985 	if (link) {
   8986 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8987 			device_xname(dev),
   8988 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8989 	} else {
   8990 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8991 			device_xname(dev)));
   8992 	}
   8993 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8994 		wm_gig_downshift_workaround_ich8lan(sc);
   8995 
   8996 	if ((sc->sc_type == WM_T_ICH8)
   8997 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8998 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8999 	}
   9000 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9001 		device_xname(dev)));
   9002 	mii_pollstat(&sc->sc_mii);
   9003 	if (sc->sc_type == WM_T_82543) {
   9004 		int miistatus, active;
   9005 
   9006 		/*
   9007 		 * With 82543, we need to force speed and
   9008 		 * duplex on the MAC equal to what the PHY
   9009 		 * speed and duplex configuration is.
   9010 		 */
   9011 		miistatus = sc->sc_mii.mii_media_status;
   9012 
   9013 		if (miistatus & IFM_ACTIVE) {
   9014 			active = sc->sc_mii.mii_media_active;
   9015 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9016 			switch (IFM_SUBTYPE(active)) {
   9017 			case IFM_10_T:
   9018 				sc->sc_ctrl |= CTRL_SPEED_10;
   9019 				break;
   9020 			case IFM_100_TX:
   9021 				sc->sc_ctrl |= CTRL_SPEED_100;
   9022 				break;
   9023 			case IFM_1000_T:
   9024 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9025 				break;
   9026 			default:
   9027 				/*
   9028 				 * Fiber?
   9029 				 * Shoud not enter here.
   9030 				 */
   9031 				printf("unknown media (%x)\n", active);
   9032 				break;
   9033 			}
   9034 			if (active & IFM_FDX)
   9035 				sc->sc_ctrl |= CTRL_FD;
   9036 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9037 		}
   9038 	} else if (sc->sc_type == WM_T_PCH) {
   9039 		wm_k1_gig_workaround_hv(sc,
   9040 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9041 	}
   9042 
   9043 	/*
   9044 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9045 	 * aggressive resulting in many collisions. To avoid this, increase
   9046 	 * the IPG and reduce Rx latency in the PHY.
   9047 	 */
   9048 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9049 	    && link) {
   9050 		uint32_t tipg_reg;
   9051 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9052 		bool fdx;
   9053 		uint16_t emi_addr, emi_val;
   9054 
   9055 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9056 		tipg_reg &= ~TIPG_IPGT_MASK;
   9057 		fdx = status & STATUS_FD;
   9058 
   9059 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9060 			tipg_reg |= 0xff;
   9061 			/* Reduce Rx latency in analog PHY */
   9062 			emi_val = 0;
   9063 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9064 		    fdx && speed != STATUS_SPEED_1000) {
   9065 			tipg_reg |= 0xc;
   9066 			emi_val = 1;
   9067 		} else {
   9068 			/* Roll back the default values */
   9069 			tipg_reg |= 0x08;
   9070 			emi_val = 1;
   9071 		}
   9072 
   9073 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9074 
   9075 		rv = sc->phy.acquire(sc);
   9076 		if (rv)
   9077 			return;
   9078 
   9079 		if (sc->sc_type == WM_T_PCH2)
   9080 			emi_addr = I82579_RX_CONFIG;
   9081 		else
   9082 			emi_addr = I217_RX_CONFIG;
   9083 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9084 
   9085 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9086 			uint16_t phy_reg;
   9087 
   9088 			sc->phy.readreg_locked(dev, 2,
   9089 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9090 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9091 			if (speed == STATUS_SPEED_100
   9092 			    || speed == STATUS_SPEED_10)
   9093 				phy_reg |= 0x3e8;
   9094 			else
   9095 				phy_reg |= 0xfa;
   9096 			sc->phy.writereg_locked(dev, 2,
   9097 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9098 
   9099 			if (speed == STATUS_SPEED_1000) {
   9100 				sc->phy.readreg_locked(dev, 2,
   9101 				    HV_PM_CTRL, &phy_reg);
   9102 
   9103 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9104 
   9105 				sc->phy.writereg_locked(dev, 2,
   9106 				    HV_PM_CTRL, phy_reg);
   9107 			}
   9108 		}
   9109 		sc->phy.release(sc);
   9110 
   9111 		if (rv)
   9112 			return;
   9113 
   9114 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9115 			uint16_t data, ptr_gap;
   9116 
   9117 			if (speed == STATUS_SPEED_1000) {
   9118 				rv = sc->phy.acquire(sc);
   9119 				if (rv)
   9120 					return;
   9121 
   9122 				rv = sc->phy.readreg_locked(dev, 2,
   9123 				    I219_UNKNOWN1, &data);
   9124 				if (rv) {
   9125 					sc->phy.release(sc);
   9126 					return;
   9127 				}
   9128 
   9129 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9130 				if (ptr_gap < 0x18) {
   9131 					data &= ~(0x3ff << 2);
   9132 					data |= (0x18 << 2);
   9133 					rv = sc->phy.writereg_locked(dev,
   9134 					    2, I219_UNKNOWN1, data);
   9135 				}
   9136 				sc->phy.release(sc);
   9137 				if (rv)
   9138 					return;
   9139 			} else {
   9140 				rv = sc->phy.acquire(sc);
   9141 				if (rv)
   9142 					return;
   9143 
   9144 				rv = sc->phy.writereg_locked(dev, 2,
   9145 				    I219_UNKNOWN1, 0xc023);
   9146 				sc->phy.release(sc);
   9147 				if (rv)
   9148 					return;
   9149 
   9150 			}
   9151 		}
   9152 	}
   9153 
   9154 	/*
   9155 	 * I217 Packet Loss issue:
   9156 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9157 	 * on power up.
   9158 	 * Set the Beacon Duration for I217 to 8 usec
   9159 	 */
   9160 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9161 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9162 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9163 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9164 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9165 	}
   9166 
   9167 	/* Work-around I218 hang issue */
   9168 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9169 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9170 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9171 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9172 		wm_k1_workaround_lpt_lp(sc, link);
   9173 
   9174 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9175 		/*
   9176 		 * Set platform power management values for Latency
   9177 		 * Tolerance Reporting (LTR)
   9178 		 */
   9179 		wm_platform_pm_pch_lpt(sc,
   9180 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9181 	}
   9182 
   9183 	/* Clear link partner's EEE ability */
   9184 	sc->eee_lp_ability = 0;
   9185 
   9186 	/* FEXTNVM6 K1-off workaround */
   9187 	if (sc->sc_type == WM_T_PCH_SPT) {
   9188 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9189 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9190 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9191 		else
   9192 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9193 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9194 	}
   9195 
   9196 	if (!link)
   9197 		return;
   9198 
   9199 	switch (sc->sc_type) {
   9200 	case WM_T_PCH2:
   9201 		wm_k1_workaround_lv(sc);
   9202 		/* FALLTHROUGH */
   9203 	case WM_T_PCH:
   9204 		if (sc->sc_phytype == WMPHY_82578)
   9205 			wm_link_stall_workaround_hv(sc);
   9206 		break;
   9207 	default:
   9208 		break;
   9209 	}
   9210 
   9211 	/* Enable/Disable EEE after link up */
   9212 	if (sc->sc_phytype > WMPHY_82579)
   9213 		wm_set_eee_pchlan(sc);
   9214 }
   9215 
   9216 /*
   9217  * wm_linkintr_tbi:
   9218  *
   9219  *	Helper; handle link interrupts for TBI mode.
   9220  */
   9221 static void
   9222 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9223 {
   9224 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9225 	uint32_t status;
   9226 
   9227 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9228 		__func__));
   9229 
   9230 	status = CSR_READ(sc, WMREG_STATUS);
   9231 	if (icr & ICR_LSC) {
   9232 		wm_check_for_link(sc);
   9233 		if (status & STATUS_LU) {
   9234 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9235 				device_xname(sc->sc_dev),
   9236 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9237 			/*
   9238 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9239 			 * so we should update sc->sc_ctrl
   9240 			 */
   9241 
   9242 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9243 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9244 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9245 			if (status & STATUS_FD)
   9246 				sc->sc_tctl |=
   9247 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9248 			else
   9249 				sc->sc_tctl |=
   9250 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9251 			if (sc->sc_ctrl & CTRL_TFCE)
   9252 				sc->sc_fcrtl |= FCRTL_XONE;
   9253 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9254 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9255 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9256 			sc->sc_tbi_linkup = 1;
   9257 			if_link_state_change(ifp, LINK_STATE_UP);
   9258 		} else {
   9259 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9260 				device_xname(sc->sc_dev)));
   9261 			sc->sc_tbi_linkup = 0;
   9262 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9263 		}
   9264 		/* Update LED */
   9265 		wm_tbi_serdes_set_linkled(sc);
   9266 	} else if (icr & ICR_RXSEQ)
   9267 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9268 			device_xname(sc->sc_dev)));
   9269 }
   9270 
   9271 /*
   9272  * wm_linkintr_serdes:
   9273  *
   9274  *	Helper; handle link interrupts for TBI mode.
   9275  */
   9276 static void
   9277 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9278 {
   9279 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9280 	struct mii_data *mii = &sc->sc_mii;
   9281 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9282 	uint32_t pcs_adv, pcs_lpab, reg;
   9283 
   9284 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9285 		__func__));
   9286 
   9287 	if (icr & ICR_LSC) {
   9288 		/* Check PCS */
   9289 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9290 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9291 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9292 				device_xname(sc->sc_dev)));
   9293 			mii->mii_media_status |= IFM_ACTIVE;
   9294 			sc->sc_tbi_linkup = 1;
   9295 			if_link_state_change(ifp, LINK_STATE_UP);
   9296 		} else {
   9297 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9298 				device_xname(sc->sc_dev)));
   9299 			mii->mii_media_status |= IFM_NONE;
   9300 			sc->sc_tbi_linkup = 0;
   9301 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9302 			wm_tbi_serdes_set_linkled(sc);
   9303 			return;
   9304 		}
   9305 		mii->mii_media_active |= IFM_1000_SX;
   9306 		if ((reg & PCS_LSTS_FDX) != 0)
   9307 			mii->mii_media_active |= IFM_FDX;
   9308 		else
   9309 			mii->mii_media_active |= IFM_HDX;
   9310 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9311 			/* Check flow */
   9312 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9313 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9314 				DPRINTF(WM_DEBUG_LINK,
   9315 				    ("XXX LINKOK but not ACOMP\n"));
   9316 				return;
   9317 			}
   9318 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9319 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9320 			DPRINTF(WM_DEBUG_LINK,
   9321 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9322 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9323 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9324 				mii->mii_media_active |= IFM_FLOW
   9325 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9326 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9327 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9328 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9329 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9330 				mii->mii_media_active |= IFM_FLOW
   9331 				    | IFM_ETH_TXPAUSE;
   9332 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9333 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9334 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9335 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9336 				mii->mii_media_active |= IFM_FLOW
   9337 				    | IFM_ETH_RXPAUSE;
   9338 		}
   9339 		/* Update LED */
   9340 		wm_tbi_serdes_set_linkled(sc);
   9341 	} else
   9342 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9343 		    device_xname(sc->sc_dev)));
   9344 }
   9345 
   9346 /*
   9347  * wm_linkintr:
   9348  *
   9349  *	Helper; handle link interrupts.
   9350  */
   9351 static void
   9352 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9353 {
   9354 
   9355 	KASSERT(WM_CORE_LOCKED(sc));
   9356 
   9357 	if (sc->sc_flags & WM_F_HAS_MII)
   9358 		wm_linkintr_gmii(sc, icr);
   9359 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9360 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9361 		wm_linkintr_serdes(sc, icr);
   9362 	else
   9363 		wm_linkintr_tbi(sc, icr);
   9364 }
   9365 
   9366 /*
   9367  * wm_intr_legacy:
   9368  *
   9369  *	Interrupt service routine for INTx and MSI.
   9370  */
   9371 static int
   9372 wm_intr_legacy(void *arg)
   9373 {
   9374 	struct wm_softc *sc = arg;
   9375 	struct wm_queue *wmq = &sc->sc_queue[0];
   9376 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9377 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9378 	uint32_t icr, rndval = 0;
   9379 	int handled = 0;
   9380 
   9381 	while (1 /* CONSTCOND */) {
   9382 		icr = CSR_READ(sc, WMREG_ICR);
   9383 		if ((icr & sc->sc_icr) == 0)
   9384 			break;
   9385 		if (handled == 0)
   9386 			DPRINTF(WM_DEBUG_TX,
   9387 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9388 		if (rndval == 0)
   9389 			rndval = icr;
   9390 
   9391 		mutex_enter(rxq->rxq_lock);
   9392 
   9393 		if (rxq->rxq_stopping) {
   9394 			mutex_exit(rxq->rxq_lock);
   9395 			break;
   9396 		}
   9397 
   9398 		handled = 1;
   9399 
   9400 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9401 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9402 			DPRINTF(WM_DEBUG_RX,
   9403 			    ("%s: RX: got Rx intr 0x%08x\n",
   9404 				device_xname(sc->sc_dev),
   9405 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9406 			WM_Q_EVCNT_INCR(rxq, intr);
   9407 		}
   9408 #endif
   9409 		/*
   9410 		 * wm_rxeof() does *not* call upper layer functions directly,
   9411 		 * as if_percpuq_enqueue() just call softint_schedule().
   9412 		 * So, we can call wm_rxeof() in interrupt context.
   9413 		 */
   9414 		wm_rxeof(rxq, UINT_MAX);
   9415 
   9416 		mutex_exit(rxq->rxq_lock);
   9417 		mutex_enter(txq->txq_lock);
   9418 
   9419 		if (txq->txq_stopping) {
   9420 			mutex_exit(txq->txq_lock);
   9421 			break;
   9422 		}
   9423 
   9424 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9425 		if (icr & ICR_TXDW) {
   9426 			DPRINTF(WM_DEBUG_TX,
   9427 			    ("%s: TX: got TXDW interrupt\n",
   9428 				device_xname(sc->sc_dev)));
   9429 			WM_Q_EVCNT_INCR(txq, txdw);
   9430 		}
   9431 #endif
   9432 		wm_txeof(txq, UINT_MAX);
   9433 
   9434 		mutex_exit(txq->txq_lock);
   9435 		WM_CORE_LOCK(sc);
   9436 
   9437 		if (sc->sc_core_stopping) {
   9438 			WM_CORE_UNLOCK(sc);
   9439 			break;
   9440 		}
   9441 
   9442 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9443 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9444 			wm_linkintr(sc, icr);
   9445 		}
   9446 
   9447 		WM_CORE_UNLOCK(sc);
   9448 
   9449 		if (icr & ICR_RXO) {
   9450 #if defined(WM_DEBUG)
   9451 			log(LOG_WARNING, "%s: Receive overrun\n",
   9452 			    device_xname(sc->sc_dev));
   9453 #endif /* defined(WM_DEBUG) */
   9454 		}
   9455 	}
   9456 
   9457 	rnd_add_uint32(&sc->rnd_source, rndval);
   9458 
   9459 	if (handled) {
   9460 		/* Try to get more packets going. */
   9461 		softint_schedule(wmq->wmq_si);
   9462 	}
   9463 
   9464 	return handled;
   9465 }
   9466 
   9467 static inline void
   9468 wm_txrxintr_disable(struct wm_queue *wmq)
   9469 {
   9470 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9471 
   9472 	if (sc->sc_type == WM_T_82574)
   9473 		CSR_WRITE(sc, WMREG_IMC,
   9474 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9475 	else if (sc->sc_type == WM_T_82575)
   9476 		CSR_WRITE(sc, WMREG_EIMC,
   9477 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9478 	else
   9479 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9480 }
   9481 
   9482 static inline void
   9483 wm_txrxintr_enable(struct wm_queue *wmq)
   9484 {
   9485 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9486 
   9487 	wm_itrs_calculate(sc, wmq);
   9488 
   9489 	/*
   9490 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9491 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9492 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9493 	 * while each wm_handle_queue(wmq) is runnig.
   9494 	 */
   9495 	if (sc->sc_type == WM_T_82574)
   9496 		CSR_WRITE(sc, WMREG_IMS,
   9497 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9498 	else if (sc->sc_type == WM_T_82575)
   9499 		CSR_WRITE(sc, WMREG_EIMS,
   9500 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9501 	else
   9502 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9503 }
   9504 
   9505 static int
   9506 wm_txrxintr_msix(void *arg)
   9507 {
   9508 	struct wm_queue *wmq = arg;
   9509 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9510 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9511 	struct wm_softc *sc = txq->txq_sc;
   9512 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9513 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9514 	bool txmore;
   9515 	bool rxmore;
   9516 
   9517 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9518 
   9519 	DPRINTF(WM_DEBUG_TX,
   9520 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9521 
   9522 	wm_txrxintr_disable(wmq);
   9523 
   9524 	mutex_enter(txq->txq_lock);
   9525 
   9526 	if (txq->txq_stopping) {
   9527 		mutex_exit(txq->txq_lock);
   9528 		return 0;
   9529 	}
   9530 
   9531 	WM_Q_EVCNT_INCR(txq, txdw);
   9532 	txmore = wm_txeof(txq, txlimit);
   9533 	/* wm_deferred start() is done in wm_handle_queue(). */
   9534 	mutex_exit(txq->txq_lock);
   9535 
   9536 	DPRINTF(WM_DEBUG_RX,
   9537 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9538 	mutex_enter(rxq->rxq_lock);
   9539 
   9540 	if (rxq->rxq_stopping) {
   9541 		mutex_exit(rxq->rxq_lock);
   9542 		return 0;
   9543 	}
   9544 
   9545 	WM_Q_EVCNT_INCR(rxq, intr);
   9546 	rxmore = wm_rxeof(rxq, rxlimit);
   9547 	mutex_exit(rxq->rxq_lock);
   9548 
   9549 	wm_itrs_writereg(sc, wmq);
   9550 
   9551 	if (txmore || rxmore)
   9552 		softint_schedule(wmq->wmq_si);
   9553 	else
   9554 		wm_txrxintr_enable(wmq);
   9555 
   9556 	return 1;
   9557 }
   9558 
   9559 static void
   9560 wm_handle_queue(void *arg)
   9561 {
   9562 	struct wm_queue *wmq = arg;
   9563 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9564 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9565 	struct wm_softc *sc = txq->txq_sc;
   9566 	u_int txlimit = sc->sc_tx_process_limit;
   9567 	u_int rxlimit = sc->sc_rx_process_limit;
   9568 	bool txmore;
   9569 	bool rxmore;
   9570 
   9571 	mutex_enter(txq->txq_lock);
   9572 	if (txq->txq_stopping) {
   9573 		mutex_exit(txq->txq_lock);
   9574 		return;
   9575 	}
   9576 	txmore = wm_txeof(txq, txlimit);
   9577 	wm_deferred_start_locked(txq);
   9578 	mutex_exit(txq->txq_lock);
   9579 
   9580 	mutex_enter(rxq->rxq_lock);
   9581 	if (rxq->rxq_stopping) {
   9582 		mutex_exit(rxq->rxq_lock);
   9583 		return;
   9584 	}
   9585 	WM_Q_EVCNT_INCR(rxq, defer);
   9586 	rxmore = wm_rxeof(rxq, rxlimit);
   9587 	mutex_exit(rxq->rxq_lock);
   9588 
   9589 	if (txmore || rxmore)
   9590 		softint_schedule(wmq->wmq_si);
   9591 	else
   9592 		wm_txrxintr_enable(wmq);
   9593 }
   9594 
   9595 /*
   9596  * wm_linkintr_msix:
   9597  *
   9598  *	Interrupt service routine for link status change for MSI-X.
   9599  */
   9600 static int
   9601 wm_linkintr_msix(void *arg)
   9602 {
   9603 	struct wm_softc *sc = arg;
   9604 	uint32_t reg;
   9605 	bool has_rxo;
   9606 
   9607 	DPRINTF(WM_DEBUG_LINK,
   9608 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9609 
   9610 	reg = CSR_READ(sc, WMREG_ICR);
   9611 	WM_CORE_LOCK(sc);
   9612 	if (sc->sc_core_stopping)
   9613 		goto out;
   9614 
   9615 	if ((reg & ICR_LSC) != 0) {
   9616 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9617 		wm_linkintr(sc, ICR_LSC);
   9618 	}
   9619 
   9620 	/*
   9621 	 * XXX 82574 MSI-X mode workaround
   9622 	 *
   9623 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9624 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9625 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9626 	 * interrupts by writing WMREG_ICS to process receive packets.
   9627 	 */
   9628 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9629 #if defined(WM_DEBUG)
   9630 		log(LOG_WARNING, "%s: Receive overrun\n",
   9631 		    device_xname(sc->sc_dev));
   9632 #endif /* defined(WM_DEBUG) */
   9633 
   9634 		has_rxo = true;
   9635 		/*
   9636 		 * The RXO interrupt is very high rate when receive traffic is
   9637 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9638 		 * interrupts. ICR_OTHER will be enabled at the end of
   9639 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9640 		 * ICR_RXQ(1) interrupts.
   9641 		 */
   9642 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9643 
   9644 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9645 	}
   9646 
   9647 
   9648 
   9649 out:
   9650 	WM_CORE_UNLOCK(sc);
   9651 
   9652 	if (sc->sc_type == WM_T_82574) {
   9653 		if (!has_rxo)
   9654 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9655 		else
   9656 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9657 	} else if (sc->sc_type == WM_T_82575)
   9658 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9659 	else
   9660 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9661 
   9662 	return 1;
   9663 }
   9664 
   9665 /*
   9666  * Media related.
   9667  * GMII, SGMII, TBI (and SERDES)
   9668  */
   9669 
   9670 /* Common */
   9671 
   9672 /*
   9673  * wm_tbi_serdes_set_linkled:
   9674  *
   9675  *	Update the link LED on TBI and SERDES devices.
   9676  */
   9677 static void
   9678 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9679 {
   9680 
   9681 	if (sc->sc_tbi_linkup)
   9682 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9683 	else
   9684 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9685 
   9686 	/* 82540 or newer devices are active low */
   9687 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9688 
   9689 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9690 }
   9691 
   9692 /* GMII related */
   9693 
   9694 /*
   9695  * wm_gmii_reset:
   9696  *
   9697  *	Reset the PHY.
   9698  */
   9699 static void
   9700 wm_gmii_reset(struct wm_softc *sc)
   9701 {
   9702 	uint32_t reg;
   9703 	int rv;
   9704 
   9705 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9706 		device_xname(sc->sc_dev), __func__));
   9707 
   9708 	rv = sc->phy.acquire(sc);
   9709 	if (rv != 0) {
   9710 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9711 		    __func__);
   9712 		return;
   9713 	}
   9714 
   9715 	switch (sc->sc_type) {
   9716 	case WM_T_82542_2_0:
   9717 	case WM_T_82542_2_1:
   9718 		/* null */
   9719 		break;
   9720 	case WM_T_82543:
   9721 		/*
   9722 		 * With 82543, we need to force speed and duplex on the MAC
   9723 		 * equal to what the PHY speed and duplex configuration is.
   9724 		 * In addition, we need to perform a hardware reset on the PHY
   9725 		 * to take it out of reset.
   9726 		 */
   9727 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9728 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9729 
   9730 		/* The PHY reset pin is active-low. */
   9731 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9732 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9733 		    CTRL_EXT_SWDPIN(4));
   9734 		reg |= CTRL_EXT_SWDPIO(4);
   9735 
   9736 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9737 		CSR_WRITE_FLUSH(sc);
   9738 		delay(10*1000);
   9739 
   9740 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9741 		CSR_WRITE_FLUSH(sc);
   9742 		delay(150);
   9743 #if 0
   9744 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9745 #endif
   9746 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9747 		break;
   9748 	case WM_T_82544:	/* Reset 10000us */
   9749 	case WM_T_82540:
   9750 	case WM_T_82545:
   9751 	case WM_T_82545_3:
   9752 	case WM_T_82546:
   9753 	case WM_T_82546_3:
   9754 	case WM_T_82541:
   9755 	case WM_T_82541_2:
   9756 	case WM_T_82547:
   9757 	case WM_T_82547_2:
   9758 	case WM_T_82571:	/* Reset 100us */
   9759 	case WM_T_82572:
   9760 	case WM_T_82573:
   9761 	case WM_T_82574:
   9762 	case WM_T_82575:
   9763 	case WM_T_82576:
   9764 	case WM_T_82580:
   9765 	case WM_T_I350:
   9766 	case WM_T_I354:
   9767 	case WM_T_I210:
   9768 	case WM_T_I211:
   9769 	case WM_T_82583:
   9770 	case WM_T_80003:
   9771 		/* Generic reset */
   9772 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9773 		CSR_WRITE_FLUSH(sc);
   9774 		delay(20000);
   9775 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9776 		CSR_WRITE_FLUSH(sc);
   9777 		delay(20000);
   9778 
   9779 		if ((sc->sc_type == WM_T_82541)
   9780 		    || (sc->sc_type == WM_T_82541_2)
   9781 		    || (sc->sc_type == WM_T_82547)
   9782 		    || (sc->sc_type == WM_T_82547_2)) {
   9783 			/* Workaround for igp are done in igp_reset() */
   9784 			/* XXX add code to set LED after phy reset */
   9785 		}
   9786 		break;
   9787 	case WM_T_ICH8:
   9788 	case WM_T_ICH9:
   9789 	case WM_T_ICH10:
   9790 	case WM_T_PCH:
   9791 	case WM_T_PCH2:
   9792 	case WM_T_PCH_LPT:
   9793 	case WM_T_PCH_SPT:
   9794 	case WM_T_PCH_CNP:
   9795 		/* Generic reset */
   9796 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9797 		CSR_WRITE_FLUSH(sc);
   9798 		delay(100);
   9799 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9800 		CSR_WRITE_FLUSH(sc);
   9801 		delay(150);
   9802 		break;
   9803 	default:
   9804 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9805 		    __func__);
   9806 		break;
   9807 	}
   9808 
   9809 	sc->phy.release(sc);
   9810 
   9811 	/* get_cfg_done */
   9812 	wm_get_cfg_done(sc);
   9813 
   9814 	/* Extra setup */
   9815 	switch (sc->sc_type) {
   9816 	case WM_T_82542_2_0:
   9817 	case WM_T_82542_2_1:
   9818 	case WM_T_82543:
   9819 	case WM_T_82544:
   9820 	case WM_T_82540:
   9821 	case WM_T_82545:
   9822 	case WM_T_82545_3:
   9823 	case WM_T_82546:
   9824 	case WM_T_82546_3:
   9825 	case WM_T_82541_2:
   9826 	case WM_T_82547_2:
   9827 	case WM_T_82571:
   9828 	case WM_T_82572:
   9829 	case WM_T_82573:
   9830 	case WM_T_82574:
   9831 	case WM_T_82583:
   9832 	case WM_T_82575:
   9833 	case WM_T_82576:
   9834 	case WM_T_82580:
   9835 	case WM_T_I350:
   9836 	case WM_T_I354:
   9837 	case WM_T_I210:
   9838 	case WM_T_I211:
   9839 	case WM_T_80003:
   9840 		/* Null */
   9841 		break;
   9842 	case WM_T_82541:
   9843 	case WM_T_82547:
   9844 		/* XXX Configure actively LED after PHY reset */
   9845 		break;
   9846 	case WM_T_ICH8:
   9847 	case WM_T_ICH9:
   9848 	case WM_T_ICH10:
   9849 	case WM_T_PCH:
   9850 	case WM_T_PCH2:
   9851 	case WM_T_PCH_LPT:
   9852 	case WM_T_PCH_SPT:
   9853 	case WM_T_PCH_CNP:
   9854 		wm_phy_post_reset(sc);
   9855 		break;
   9856 	default:
   9857 		panic("%s: unknown type\n", __func__);
   9858 		break;
   9859 	}
   9860 }
   9861 
   9862 /*
   9863  * Setup sc_phytype and mii_{read|write}reg.
   9864  *
   9865  *  To identify PHY type, correct read/write function should be selected.
   9866  * To select correct read/write function, PCI ID or MAC type are required
   9867  * without accessing PHY registers.
   9868  *
   9869  *  On the first call of this function, PHY ID is not known yet. Check
   9870  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9871  * result might be incorrect.
   9872  *
   9873  *  In the second call, PHY OUI and model is used to identify PHY type.
   9874  * It might not be perfpect because of the lack of compared entry, but it
   9875  * would be better than the first call.
   9876  *
   9877  *  If the detected new result and previous assumption is different,
   9878  * diagnous message will be printed.
   9879  */
   9880 static void
   9881 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9882     uint16_t phy_model)
   9883 {
   9884 	device_t dev = sc->sc_dev;
   9885 	struct mii_data *mii = &sc->sc_mii;
   9886 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9887 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9888 	mii_readreg_t new_readreg;
   9889 	mii_writereg_t new_writereg;
   9890 
   9891 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9892 		device_xname(sc->sc_dev), __func__));
   9893 
   9894 	if (mii->mii_readreg == NULL) {
   9895 		/*
   9896 		 *  This is the first call of this function. For ICH and PCH
   9897 		 * variants, it's difficult to determine the PHY access method
   9898 		 * by sc_type, so use the PCI product ID for some devices.
   9899 		 */
   9900 
   9901 		switch (sc->sc_pcidevid) {
   9902 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9903 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9904 			/* 82577 */
   9905 			new_phytype = WMPHY_82577;
   9906 			break;
   9907 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9908 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9909 			/* 82578 */
   9910 			new_phytype = WMPHY_82578;
   9911 			break;
   9912 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9913 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9914 			/* 82579 */
   9915 			new_phytype = WMPHY_82579;
   9916 			break;
   9917 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9918 		case PCI_PRODUCT_INTEL_82801I_BM:
   9919 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9920 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9921 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9922 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9923 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9924 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9925 			/* ICH8, 9, 10 with 82567 */
   9926 			new_phytype = WMPHY_BM;
   9927 			break;
   9928 		default:
   9929 			break;
   9930 		}
   9931 	} else {
   9932 		/* It's not the first call. Use PHY OUI and model */
   9933 		switch (phy_oui) {
   9934 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9935 			switch (phy_model) {
   9936 			case 0x0004: /* XXX */
   9937 				new_phytype = WMPHY_82578;
   9938 				break;
   9939 			default:
   9940 				break;
   9941 			}
   9942 			break;
   9943 		case MII_OUI_xxMARVELL:
   9944 			switch (phy_model) {
   9945 			case MII_MODEL_xxMARVELL_I210:
   9946 				new_phytype = WMPHY_I210;
   9947 				break;
   9948 			case MII_MODEL_xxMARVELL_E1011:
   9949 			case MII_MODEL_xxMARVELL_E1000_3:
   9950 			case MII_MODEL_xxMARVELL_E1000_5:
   9951 			case MII_MODEL_xxMARVELL_E1112:
   9952 				new_phytype = WMPHY_M88;
   9953 				break;
   9954 			case MII_MODEL_xxMARVELL_E1149:
   9955 				new_phytype = WMPHY_BM;
   9956 				break;
   9957 			case MII_MODEL_xxMARVELL_E1111:
   9958 			case MII_MODEL_xxMARVELL_I347:
   9959 			case MII_MODEL_xxMARVELL_E1512:
   9960 			case MII_MODEL_xxMARVELL_E1340M:
   9961 			case MII_MODEL_xxMARVELL_E1543:
   9962 				new_phytype = WMPHY_M88;
   9963 				break;
   9964 			case MII_MODEL_xxMARVELL_I82563:
   9965 				new_phytype = WMPHY_GG82563;
   9966 				break;
   9967 			default:
   9968 				break;
   9969 			}
   9970 			break;
   9971 		case MII_OUI_INTEL:
   9972 			switch (phy_model) {
   9973 			case MII_MODEL_INTEL_I82577:
   9974 				new_phytype = WMPHY_82577;
   9975 				break;
   9976 			case MII_MODEL_INTEL_I82579:
   9977 				new_phytype = WMPHY_82579;
   9978 				break;
   9979 			case MII_MODEL_INTEL_I217:
   9980 				new_phytype = WMPHY_I217;
   9981 				break;
   9982 			case MII_MODEL_INTEL_I82580:
   9983 			case MII_MODEL_INTEL_I350:
   9984 				new_phytype = WMPHY_82580;
   9985 				break;
   9986 			default:
   9987 				break;
   9988 			}
   9989 			break;
   9990 		case MII_OUI_yyINTEL:
   9991 			switch (phy_model) {
   9992 			case MII_MODEL_yyINTEL_I82562G:
   9993 			case MII_MODEL_yyINTEL_I82562EM:
   9994 			case MII_MODEL_yyINTEL_I82562ET:
   9995 				new_phytype = WMPHY_IFE;
   9996 				break;
   9997 			case MII_MODEL_yyINTEL_IGP01E1000:
   9998 				new_phytype = WMPHY_IGP;
   9999 				break;
   10000 			case MII_MODEL_yyINTEL_I82566:
   10001 				new_phytype = WMPHY_IGP_3;
   10002 				break;
   10003 			default:
   10004 				break;
   10005 			}
   10006 			break;
   10007 		default:
   10008 			break;
   10009 		}
   10010 		if (new_phytype == WMPHY_UNKNOWN)
   10011 			aprint_verbose_dev(dev,
   10012 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10013 			    __func__, phy_oui, phy_model);
   10014 
   10015 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10016 		    && (sc->sc_phytype != new_phytype )) {
   10017 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10018 			    "was incorrect. PHY type from PHY ID = %u\n",
   10019 			    sc->sc_phytype, new_phytype);
   10020 		}
   10021 	}
   10022 
   10023 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10024 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10025 		/* SGMII */
   10026 		new_readreg = wm_sgmii_readreg;
   10027 		new_writereg = wm_sgmii_writereg;
   10028 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10029 		/* BM2 (phyaddr == 1) */
   10030 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10031 		    && (new_phytype != WMPHY_BM)
   10032 		    && (new_phytype != WMPHY_UNKNOWN))
   10033 			doubt_phytype = new_phytype;
   10034 		new_phytype = WMPHY_BM;
   10035 		new_readreg = wm_gmii_bm_readreg;
   10036 		new_writereg = wm_gmii_bm_writereg;
   10037 	} else if (sc->sc_type >= WM_T_PCH) {
   10038 		/* All PCH* use _hv_ */
   10039 		new_readreg = wm_gmii_hv_readreg;
   10040 		new_writereg = wm_gmii_hv_writereg;
   10041 	} else if (sc->sc_type >= WM_T_ICH8) {
   10042 		/* non-82567 ICH8, 9 and 10 */
   10043 		new_readreg = wm_gmii_i82544_readreg;
   10044 		new_writereg = wm_gmii_i82544_writereg;
   10045 	} else if (sc->sc_type >= WM_T_80003) {
   10046 		/* 80003 */
   10047 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10048 		    && (new_phytype != WMPHY_GG82563)
   10049 		    && (new_phytype != WMPHY_UNKNOWN))
   10050 			doubt_phytype = new_phytype;
   10051 		new_phytype = WMPHY_GG82563;
   10052 		new_readreg = wm_gmii_i80003_readreg;
   10053 		new_writereg = wm_gmii_i80003_writereg;
   10054 	} else if (sc->sc_type >= WM_T_I210) {
   10055 		/* I210 and I211 */
   10056 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10057 		    && (new_phytype != WMPHY_I210)
   10058 		    && (new_phytype != WMPHY_UNKNOWN))
   10059 			doubt_phytype = new_phytype;
   10060 		new_phytype = WMPHY_I210;
   10061 		new_readreg = wm_gmii_gs40g_readreg;
   10062 		new_writereg = wm_gmii_gs40g_writereg;
   10063 	} else if (sc->sc_type >= WM_T_82580) {
   10064 		/* 82580, I350 and I354 */
   10065 		new_readreg = wm_gmii_82580_readreg;
   10066 		new_writereg = wm_gmii_82580_writereg;
   10067 	} else if (sc->sc_type >= WM_T_82544) {
   10068 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10069 		new_readreg = wm_gmii_i82544_readreg;
   10070 		new_writereg = wm_gmii_i82544_writereg;
   10071 	} else {
   10072 		new_readreg = wm_gmii_i82543_readreg;
   10073 		new_writereg = wm_gmii_i82543_writereg;
   10074 	}
   10075 
   10076 	if (new_phytype == WMPHY_BM) {
   10077 		/* All BM use _bm_ */
   10078 		new_readreg = wm_gmii_bm_readreg;
   10079 		new_writereg = wm_gmii_bm_writereg;
   10080 	}
   10081 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10082 		/* All PCH* use _hv_ */
   10083 		new_readreg = wm_gmii_hv_readreg;
   10084 		new_writereg = wm_gmii_hv_writereg;
   10085 	}
   10086 
   10087 	/* Diag output */
   10088 	if (doubt_phytype != WMPHY_UNKNOWN)
   10089 		aprint_error_dev(dev, "Assumed new PHY type was "
   10090 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10091 		    new_phytype);
   10092 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10093 	    && (sc->sc_phytype != new_phytype ))
   10094 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10095 		    "was incorrect. New PHY type = %u\n",
   10096 		    sc->sc_phytype, new_phytype);
   10097 
   10098 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10099 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10100 
   10101 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10102 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10103 		    "function was incorrect.\n");
   10104 
   10105 	/* Update now */
   10106 	sc->sc_phytype = new_phytype;
   10107 	mii->mii_readreg = new_readreg;
   10108 	mii->mii_writereg = new_writereg;
   10109 	if (new_readreg == wm_gmii_hv_readreg) {
   10110 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10111 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10112 	} else if (new_readreg == wm_sgmii_readreg) {
   10113 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10114 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10115 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10116 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10117 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10118 	}
   10119 }
   10120 
   10121 /*
   10122  * wm_get_phy_id_82575:
   10123  *
   10124  * Return PHY ID. Return -1 if it failed.
   10125  */
   10126 static int
   10127 wm_get_phy_id_82575(struct wm_softc *sc)
   10128 {
   10129 	uint32_t reg;
   10130 	int phyid = -1;
   10131 
   10132 	/* XXX */
   10133 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10134 		return -1;
   10135 
   10136 	if (wm_sgmii_uses_mdio(sc)) {
   10137 		switch (sc->sc_type) {
   10138 		case WM_T_82575:
   10139 		case WM_T_82576:
   10140 			reg = CSR_READ(sc, WMREG_MDIC);
   10141 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10142 			break;
   10143 		case WM_T_82580:
   10144 		case WM_T_I350:
   10145 		case WM_T_I354:
   10146 		case WM_T_I210:
   10147 		case WM_T_I211:
   10148 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10149 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10150 			break;
   10151 		default:
   10152 			return -1;
   10153 		}
   10154 	}
   10155 
   10156 	return phyid;
   10157 }
   10158 
   10159 
   10160 /*
   10161  * wm_gmii_mediainit:
   10162  *
   10163  *	Initialize media for use on 1000BASE-T devices.
   10164  */
   10165 static void
   10166 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10167 {
   10168 	device_t dev = sc->sc_dev;
   10169 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10170 	struct mii_data *mii = &sc->sc_mii;
   10171 	uint32_t reg;
   10172 
   10173 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10174 		device_xname(sc->sc_dev), __func__));
   10175 
   10176 	/* We have GMII. */
   10177 	sc->sc_flags |= WM_F_HAS_MII;
   10178 
   10179 	if (sc->sc_type == WM_T_80003)
   10180 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10181 	else
   10182 		sc->sc_tipg = TIPG_1000T_DFLT;
   10183 
   10184 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10185 	if ((sc->sc_type == WM_T_82580)
   10186 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10187 	    || (sc->sc_type == WM_T_I211)) {
   10188 		reg = CSR_READ(sc, WMREG_PHPM);
   10189 		reg &= ~PHPM_GO_LINK_D;
   10190 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10191 	}
   10192 
   10193 	/*
   10194 	 * Let the chip set speed/duplex on its own based on
   10195 	 * signals from the PHY.
   10196 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10197 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10198 	 */
   10199 	sc->sc_ctrl |= CTRL_SLU;
   10200 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10201 
   10202 	/* Initialize our media structures and probe the GMII. */
   10203 	mii->mii_ifp = ifp;
   10204 
   10205 	mii->mii_statchg = wm_gmii_statchg;
   10206 
   10207 	/* get PHY control from SMBus to PCIe */
   10208 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10209 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10210 	    || (sc->sc_type == WM_T_PCH_CNP))
   10211 		wm_init_phy_workarounds_pchlan(sc);
   10212 
   10213 	wm_gmii_reset(sc);
   10214 
   10215 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10216 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10217 	    wm_gmii_mediastatus);
   10218 
   10219 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10220 	    || (sc->sc_type == WM_T_82580)
   10221 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10222 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10223 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10224 			/* Attach only one port */
   10225 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10226 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10227 		} else {
   10228 			int i, id;
   10229 			uint32_t ctrl_ext;
   10230 
   10231 			id = wm_get_phy_id_82575(sc);
   10232 			if (id != -1) {
   10233 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10234 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10235 			}
   10236 			if ((id == -1)
   10237 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10238 				/* Power on sgmii phy if it is disabled */
   10239 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10240 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10241 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10242 				CSR_WRITE_FLUSH(sc);
   10243 				delay(300*1000); /* XXX too long */
   10244 
   10245 				/* From 1 to 8 */
   10246 				for (i = 1; i < 8; i++)
   10247 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10248 					    0xffffffff, i, MII_OFFSET_ANY,
   10249 					    MIIF_DOPAUSE);
   10250 
   10251 				/* Restore previous sfp cage power state */
   10252 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10253 			}
   10254 		}
   10255 	} else
   10256 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10257 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10258 
   10259 	/*
   10260 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10261 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10262 	 */
   10263 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10264 		|| (sc->sc_type == WM_T_PCH_SPT)
   10265 		|| (sc->sc_type == WM_T_PCH_CNP))
   10266 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10267 		wm_set_mdio_slow_mode_hv(sc);
   10268 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10269 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10270 	}
   10271 
   10272 	/*
   10273 	 * (For ICH8 variants)
   10274 	 * If PHY detection failed, use BM's r/w function and retry.
   10275 	 */
   10276 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10277 		/* if failed, retry with *_bm_* */
   10278 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10279 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10280 		    sc->sc_phytype);
   10281 		sc->sc_phytype = WMPHY_BM;
   10282 		mii->mii_readreg = wm_gmii_bm_readreg;
   10283 		mii->mii_writereg = wm_gmii_bm_writereg;
   10284 
   10285 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10286 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10287 	}
   10288 
   10289 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10290 		/* Any PHY wasn't find */
   10291 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10292 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10293 		sc->sc_phytype = WMPHY_NONE;
   10294 	} else {
   10295 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10296 
   10297 		/*
   10298 		 * PHY Found! Check PHY type again by the second call of
   10299 		 * wm_gmii_setup_phytype.
   10300 		 */
   10301 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10302 		    child->mii_mpd_model);
   10303 
   10304 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10305 	}
   10306 }
   10307 
   10308 /*
   10309  * wm_gmii_mediachange:	[ifmedia interface function]
   10310  *
   10311  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10312  */
   10313 static int
   10314 wm_gmii_mediachange(struct ifnet *ifp)
   10315 {
   10316 	struct wm_softc *sc = ifp->if_softc;
   10317 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10318 	int rc;
   10319 
   10320 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10321 		device_xname(sc->sc_dev), __func__));
   10322 	if ((ifp->if_flags & IFF_UP) == 0)
   10323 		return 0;
   10324 
   10325 	/* Disable D0 LPLU. */
   10326 	wm_lplu_d0_disable(sc);
   10327 
   10328 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10329 	sc->sc_ctrl |= CTRL_SLU;
   10330 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10331 	    || (sc->sc_type > WM_T_82543)) {
   10332 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10333 	} else {
   10334 		sc->sc_ctrl &= ~CTRL_ASDE;
   10335 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10336 		if (ife->ifm_media & IFM_FDX)
   10337 			sc->sc_ctrl |= CTRL_FD;
   10338 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10339 		case IFM_10_T:
   10340 			sc->sc_ctrl |= CTRL_SPEED_10;
   10341 			break;
   10342 		case IFM_100_TX:
   10343 			sc->sc_ctrl |= CTRL_SPEED_100;
   10344 			break;
   10345 		case IFM_1000_T:
   10346 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10347 			break;
   10348 		case IFM_NONE:
   10349 			/* There is no specific setting for IFM_NONE */
   10350 			break;
   10351 		default:
   10352 			panic("wm_gmii_mediachange: bad media 0x%x",
   10353 			    ife->ifm_media);
   10354 		}
   10355 	}
   10356 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10357 	CSR_WRITE_FLUSH(sc);
   10358 	if (sc->sc_type <= WM_T_82543)
   10359 		wm_gmii_reset(sc);
   10360 
   10361 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10362 		return 0;
   10363 	return rc;
   10364 }
   10365 
   10366 /*
   10367  * wm_gmii_mediastatus:	[ifmedia interface function]
   10368  *
   10369  *	Get the current interface media status on a 1000BASE-T device.
   10370  */
   10371 static void
   10372 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10373 {
   10374 	struct wm_softc *sc = ifp->if_softc;
   10375 
   10376 	ether_mediastatus(ifp, ifmr);
   10377 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10378 	    | sc->sc_flowflags;
   10379 }
   10380 
   10381 #define	MDI_IO		CTRL_SWDPIN(2)
   10382 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10383 #define	MDI_CLK		CTRL_SWDPIN(3)
   10384 
   10385 static void
   10386 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10387 {
   10388 	uint32_t i, v;
   10389 
   10390 	v = CSR_READ(sc, WMREG_CTRL);
   10391 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10392 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10393 
   10394 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10395 		if (data & i)
   10396 			v |= MDI_IO;
   10397 		else
   10398 			v &= ~MDI_IO;
   10399 		CSR_WRITE(sc, WMREG_CTRL, v);
   10400 		CSR_WRITE_FLUSH(sc);
   10401 		delay(10);
   10402 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10403 		CSR_WRITE_FLUSH(sc);
   10404 		delay(10);
   10405 		CSR_WRITE(sc, WMREG_CTRL, v);
   10406 		CSR_WRITE_FLUSH(sc);
   10407 		delay(10);
   10408 	}
   10409 }
   10410 
   10411 static uint16_t
   10412 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10413 {
   10414 	uint32_t v, i;
   10415 	uint16_t data = 0;
   10416 
   10417 	v = CSR_READ(sc, WMREG_CTRL);
   10418 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10419 	v |= CTRL_SWDPIO(3);
   10420 
   10421 	CSR_WRITE(sc, WMREG_CTRL, v);
   10422 	CSR_WRITE_FLUSH(sc);
   10423 	delay(10);
   10424 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10425 	CSR_WRITE_FLUSH(sc);
   10426 	delay(10);
   10427 	CSR_WRITE(sc, WMREG_CTRL, v);
   10428 	CSR_WRITE_FLUSH(sc);
   10429 	delay(10);
   10430 
   10431 	for (i = 0; i < 16; i++) {
   10432 		data <<= 1;
   10433 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10434 		CSR_WRITE_FLUSH(sc);
   10435 		delay(10);
   10436 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10437 			data |= 1;
   10438 		CSR_WRITE(sc, WMREG_CTRL, v);
   10439 		CSR_WRITE_FLUSH(sc);
   10440 		delay(10);
   10441 	}
   10442 
   10443 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10444 	CSR_WRITE_FLUSH(sc);
   10445 	delay(10);
   10446 	CSR_WRITE(sc, WMREG_CTRL, v);
   10447 	CSR_WRITE_FLUSH(sc);
   10448 	delay(10);
   10449 
   10450 	return data;
   10451 }
   10452 
   10453 #undef MDI_IO
   10454 #undef MDI_DIR
   10455 #undef MDI_CLK
   10456 
   10457 /*
   10458  * wm_gmii_i82543_readreg:	[mii interface function]
   10459  *
   10460  *	Read a PHY register on the GMII (i82543 version).
   10461  */
   10462 static int
   10463 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10464 {
   10465 	struct wm_softc *sc = device_private(dev);
   10466 
   10467 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10468 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10469 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10470 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10471 
   10472 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10473 		device_xname(dev), phy, reg, *val));
   10474 
   10475 	return 0;
   10476 }
   10477 
   10478 /*
   10479  * wm_gmii_i82543_writereg:	[mii interface function]
   10480  *
   10481  *	Write a PHY register on the GMII (i82543 version).
   10482  */
   10483 static int
   10484 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10485 {
   10486 	struct wm_softc *sc = device_private(dev);
   10487 
   10488 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10489 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10490 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10491 	    (MII_COMMAND_START << 30), 32);
   10492 
   10493 	return 0;
   10494 }
   10495 
   10496 /*
   10497  * wm_gmii_mdic_readreg:	[mii interface function]
   10498  *
   10499  *	Read a PHY register on the GMII.
   10500  */
   10501 static int
   10502 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10503 {
   10504 	struct wm_softc *sc = device_private(dev);
   10505 	uint32_t mdic = 0;
   10506 	int i;
   10507 
   10508 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10509 	    && (reg > MII_ADDRMASK)) {
   10510 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10511 		    __func__, sc->sc_phytype, reg);
   10512 		reg &= MII_ADDRMASK;
   10513 	}
   10514 
   10515 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10516 	    MDIC_REGADD(reg));
   10517 
   10518 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10519 		delay(50);
   10520 		mdic = CSR_READ(sc, WMREG_MDIC);
   10521 		if (mdic & MDIC_READY)
   10522 			break;
   10523 	}
   10524 
   10525 	if ((mdic & MDIC_READY) == 0) {
   10526 		DPRINTF(WM_DEBUG_GMII,
   10527 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10528 			device_xname(dev), phy, reg));
   10529 		return ETIMEDOUT;
   10530 	} else if (mdic & MDIC_E) {
   10531 		/* This is normal if no PHY is present. */
   10532 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10533 			device_xname(sc->sc_dev), phy, reg));
   10534 		return -1;
   10535 	} else
   10536 		*val = MDIC_DATA(mdic);
   10537 
   10538 	/*
   10539 	 * Allow some time after each MDIC transaction to avoid
   10540 	 * reading duplicate data in the next MDIC transaction.
   10541 	 */
   10542 	if (sc->sc_type == WM_T_PCH2)
   10543 		delay(100);
   10544 
   10545 	return 0;
   10546 }
   10547 
   10548 /*
   10549  * wm_gmii_mdic_writereg:	[mii interface function]
   10550  *
   10551  *	Write a PHY register on the GMII.
   10552  */
   10553 static int
   10554 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10555 {
   10556 	struct wm_softc *sc = device_private(dev);
   10557 	uint32_t mdic = 0;
   10558 	int i;
   10559 
   10560 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10561 	    && (reg > MII_ADDRMASK)) {
   10562 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10563 		    __func__, sc->sc_phytype, reg);
   10564 		reg &= MII_ADDRMASK;
   10565 	}
   10566 
   10567 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10568 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10569 
   10570 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10571 		delay(50);
   10572 		mdic = CSR_READ(sc, WMREG_MDIC);
   10573 		if (mdic & MDIC_READY)
   10574 			break;
   10575 	}
   10576 
   10577 	if ((mdic & MDIC_READY) == 0) {
   10578 		DPRINTF(WM_DEBUG_GMII,
   10579 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10580 			device_xname(dev), phy, reg));
   10581 		return ETIMEDOUT;
   10582 	} else if (mdic & MDIC_E) {
   10583 		DPRINTF(WM_DEBUG_GMII,
   10584 		    ("%s: MDIC write error: phy %d reg %d\n",
   10585 			device_xname(dev), phy, reg));
   10586 		return -1;
   10587 	}
   10588 
   10589 	/*
   10590 	 * Allow some time after each MDIC transaction to avoid
   10591 	 * reading duplicate data in the next MDIC transaction.
   10592 	 */
   10593 	if (sc->sc_type == WM_T_PCH2)
   10594 		delay(100);
   10595 
   10596 	return 0;
   10597 }
   10598 
   10599 /*
   10600  * wm_gmii_i82544_readreg:	[mii interface function]
   10601  *
   10602  *	Read a PHY register on the GMII.
   10603  */
   10604 static int
   10605 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10606 {
   10607 	struct wm_softc *sc = device_private(dev);
   10608 	int rv;
   10609 
   10610 	if (sc->phy.acquire(sc)) {
   10611 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10612 		return -1;
   10613 	}
   10614 
   10615 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10616 
   10617 	sc->phy.release(sc);
   10618 
   10619 	return rv;
   10620 }
   10621 
   10622 static int
   10623 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10624 {
   10625 	struct wm_softc *sc = device_private(dev);
   10626 	int rv;
   10627 
   10628 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10629 		switch (sc->sc_phytype) {
   10630 		case WMPHY_IGP:
   10631 		case WMPHY_IGP_2:
   10632 		case WMPHY_IGP_3:
   10633 			rv = wm_gmii_mdic_writereg(dev, phy,
   10634 			    MII_IGPHY_PAGE_SELECT, reg);
   10635 			if (rv != 0)
   10636 				return rv;
   10637 			break;
   10638 		default:
   10639 #ifdef WM_DEBUG
   10640 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10641 			    __func__, sc->sc_phytype, reg);
   10642 #endif
   10643 			break;
   10644 		}
   10645 	}
   10646 
   10647 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10648 }
   10649 
   10650 /*
   10651  * wm_gmii_i82544_writereg:	[mii interface function]
   10652  *
   10653  *	Write a PHY register on the GMII.
   10654  */
   10655 static int
   10656 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10657 {
   10658 	struct wm_softc *sc = device_private(dev);
   10659 	int rv;
   10660 
   10661 	if (sc->phy.acquire(sc)) {
   10662 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10663 		return -1;
   10664 	}
   10665 
   10666 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10667 	sc->phy.release(sc);
   10668 
   10669 	return rv;
   10670 }
   10671 
   10672 static int
   10673 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10674 {
   10675 	struct wm_softc *sc = device_private(dev);
   10676 	int rv;
   10677 
   10678 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10679 		switch (sc->sc_phytype) {
   10680 		case WMPHY_IGP:
   10681 		case WMPHY_IGP_2:
   10682 		case WMPHY_IGP_3:
   10683 			rv = wm_gmii_mdic_writereg(dev, phy,
   10684 			    MII_IGPHY_PAGE_SELECT, reg);
   10685 			if (rv != 0)
   10686 				return rv;
   10687 			break;
   10688 		default:
   10689 #ifdef WM_DEBUG
   10690 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10691 			    __func__, sc->sc_phytype, reg);
   10692 #endif
   10693 			break;
   10694 		}
   10695 	}
   10696 
   10697 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10698 }
   10699 
   10700 /*
   10701  * wm_gmii_i80003_readreg:	[mii interface function]
   10702  *
   10703  *	Read a PHY register on the kumeran
   10704  * This could be handled by the PHY layer if we didn't have to lock the
   10705  * ressource ...
   10706  */
   10707 static int
   10708 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10709 {
   10710 	struct wm_softc *sc = device_private(dev);
   10711 	int page_select;
   10712 	uint16_t temp, temp2;
   10713 	int rv = 0;
   10714 
   10715 	if (phy != 1) /* Only one PHY on kumeran bus */
   10716 		return -1;
   10717 
   10718 	if (sc->phy.acquire(sc)) {
   10719 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10720 		return -1;
   10721 	}
   10722 
   10723 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10724 		page_select = GG82563_PHY_PAGE_SELECT;
   10725 	else {
   10726 		/*
   10727 		 * Use Alternative Page Select register to access registers
   10728 		 * 30 and 31.
   10729 		 */
   10730 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10731 	}
   10732 	temp = reg >> GG82563_PAGE_SHIFT;
   10733 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10734 		goto out;
   10735 
   10736 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10737 		/*
   10738 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10739 		 * register.
   10740 		 */
   10741 		delay(200);
   10742 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10743 		if ((rv != 0) || (temp2 != temp)) {
   10744 			device_printf(dev, "%s failed\n", __func__);
   10745 			rv = -1;
   10746 			goto out;
   10747 		}
   10748 		delay(200);
   10749 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10750 		delay(200);
   10751 	} else
   10752 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10753 
   10754 out:
   10755 	sc->phy.release(sc);
   10756 	return rv;
   10757 }
   10758 
   10759 /*
   10760  * wm_gmii_i80003_writereg:	[mii interface function]
   10761  *
   10762  *	Write a PHY register on the kumeran.
   10763  * This could be handled by the PHY layer if we didn't have to lock the
   10764  * ressource ...
   10765  */
   10766 static int
   10767 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10768 {
   10769 	struct wm_softc *sc = device_private(dev);
   10770 	int page_select, rv;
   10771 	uint16_t temp, temp2;
   10772 
   10773 	if (phy != 1) /* Only one PHY on kumeran bus */
   10774 		return -1;
   10775 
   10776 	if (sc->phy.acquire(sc)) {
   10777 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10778 		return -1;
   10779 	}
   10780 
   10781 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10782 		page_select = GG82563_PHY_PAGE_SELECT;
   10783 	else {
   10784 		/*
   10785 		 * Use Alternative Page Select register to access registers
   10786 		 * 30 and 31.
   10787 		 */
   10788 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10789 	}
   10790 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10791 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10792 		goto out;
   10793 
   10794 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10795 		/*
   10796 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10797 		 * register.
   10798 		 */
   10799 		delay(200);
   10800 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10801 		if ((rv != 0) || (temp2 != temp)) {
   10802 			device_printf(dev, "%s failed\n", __func__);
   10803 			rv = -1;
   10804 			goto out;
   10805 		}
   10806 		delay(200);
   10807 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10808 		delay(200);
   10809 	} else
   10810 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10811 
   10812 out:
   10813 	sc->phy.release(sc);
   10814 	return rv;
   10815 }
   10816 
   10817 /*
   10818  * wm_gmii_bm_readreg:	[mii interface function]
   10819  *
   10820  *	Read a PHY register on the kumeran
   10821  * This could be handled by the PHY layer if we didn't have to lock the
   10822  * ressource ...
   10823  */
   10824 static int
   10825 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10826 {
   10827 	struct wm_softc *sc = device_private(dev);
   10828 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10829 	int rv;
   10830 
   10831 	if (sc->phy.acquire(sc)) {
   10832 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10833 		return -1;
   10834 	}
   10835 
   10836 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10837 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10838 		    || (reg == 31)) ? 1 : phy;
   10839 	/* Page 800 works differently than the rest so it has its own func */
   10840 	if (page == BM_WUC_PAGE) {
   10841 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10842 		goto release;
   10843 	}
   10844 
   10845 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10846 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10847 		    && (sc->sc_type != WM_T_82583))
   10848 			rv = wm_gmii_mdic_writereg(dev, phy,
   10849 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10850 		else
   10851 			rv = wm_gmii_mdic_writereg(dev, phy,
   10852 			    BME1000_PHY_PAGE_SELECT, page);
   10853 		if (rv != 0)
   10854 			goto release;
   10855 	}
   10856 
   10857 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10858 
   10859 release:
   10860 	sc->phy.release(sc);
   10861 	return rv;
   10862 }
   10863 
   10864 /*
   10865  * wm_gmii_bm_writereg:	[mii interface function]
   10866  *
   10867  *	Write a PHY register on the kumeran.
   10868  * This could be handled by the PHY layer if we didn't have to lock the
   10869  * ressource ...
   10870  */
   10871 static int
   10872 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10873 {
   10874 	struct wm_softc *sc = device_private(dev);
   10875 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10876 	int rv;
   10877 
   10878 	if (sc->phy.acquire(sc)) {
   10879 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10880 		return -1;
   10881 	}
   10882 
   10883 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10884 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10885 		    || (reg == 31)) ? 1 : phy;
   10886 	/* Page 800 works differently than the rest so it has its own func */
   10887 	if (page == BM_WUC_PAGE) {
   10888 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10889 		goto release;
   10890 	}
   10891 
   10892 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10893 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10894 		    && (sc->sc_type != WM_T_82583))
   10895 			rv = wm_gmii_mdic_writereg(dev, phy,
   10896 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10897 		else
   10898 			rv = wm_gmii_mdic_writereg(dev, phy,
   10899 			    BME1000_PHY_PAGE_SELECT, page);
   10900 		if (rv != 0)
   10901 			goto release;
   10902 	}
   10903 
   10904 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10905 
   10906 release:
   10907 	sc->phy.release(sc);
   10908 	return rv;
   10909 }
   10910 
   10911 /*
   10912  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10913  *  @dev: pointer to the HW structure
   10914  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10915  *
   10916  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10917  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10918  */
   10919 static int
   10920 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10921 {
   10922 	uint16_t temp;
   10923 	int rv;
   10924 
   10925 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10926 		device_xname(dev), __func__));
   10927 
   10928 	if (!phy_regp)
   10929 		return -1;
   10930 
   10931 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10932 
   10933 	/* Select Port Control Registers page */
   10934 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10935 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10936 	if (rv != 0)
   10937 		return rv;
   10938 
   10939 	/* Read WUCE and save it */
   10940 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10941 	if (rv != 0)
   10942 		return rv;
   10943 
   10944 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10945 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10946 	 */
   10947 	temp = *phy_regp;
   10948 	temp |= BM_WUC_ENABLE_BIT;
   10949 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10950 
   10951 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10952 		return rv;
   10953 
   10954 	/* Select Host Wakeup Registers page - caller now able to write
   10955 	 * registers on the Wakeup registers page
   10956 	 */
   10957 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10958 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10959 }
   10960 
   10961 /*
   10962  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10963  *  @dev: pointer to the HW structure
   10964  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10965  *
   10966  *  Restore BM_WUC_ENABLE_REG to its original value.
   10967  *
   10968  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10969  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10970  *  caller.
   10971  */
   10972 static int
   10973 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10974 {
   10975 
   10976 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10977 		device_xname(dev), __func__));
   10978 
   10979 	if (!phy_regp)
   10980 		return -1;
   10981 
   10982 	/* Select Port Control Registers page */
   10983 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10984 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10985 
   10986 	/* Restore 769.17 to its original value */
   10987 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10988 
   10989 	return 0;
   10990 }
   10991 
   10992 /*
   10993  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10994  *  @sc: pointer to the HW structure
   10995  *  @offset: register offset to be read or written
   10996  *  @val: pointer to the data to read or write
   10997  *  @rd: determines if operation is read or write
   10998  *  @page_set: BM_WUC_PAGE already set and access enabled
   10999  *
   11000  *  Read the PHY register at offset and store the retrieved information in
   11001  *  data, or write data to PHY register at offset.  Note the procedure to
   11002  *  access the PHY wakeup registers is different than reading the other PHY
   11003  *  registers. It works as such:
   11004  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11005  *  2) Set page to 800 for host (801 if we were manageability)
   11006  *  3) Write the address using the address opcode (0x11)
   11007  *  4) Read or write the data using the data opcode (0x12)
   11008  *  5) Restore 769.17.2 to its original value
   11009  *
   11010  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11011  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11012  *
   11013  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11014  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11015  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11016  */
   11017 static int
   11018 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11019 	bool page_set)
   11020 {
   11021 	struct wm_softc *sc = device_private(dev);
   11022 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11023 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11024 	uint16_t wuce;
   11025 	int rv = 0;
   11026 
   11027 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11028 		device_xname(dev), __func__));
   11029 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11030 	if ((sc->sc_type == WM_T_PCH)
   11031 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11032 		device_printf(dev,
   11033 		    "Attempting to access page %d while gig enabled.\n", page);
   11034 	}
   11035 
   11036 	if (!page_set) {
   11037 		/* Enable access to PHY wakeup registers */
   11038 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11039 		if (rv != 0) {
   11040 			device_printf(dev,
   11041 			    "%s: Could not enable PHY wakeup reg access\n",
   11042 			    __func__);
   11043 			return rv;
   11044 		}
   11045 	}
   11046 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11047 		device_xname(sc->sc_dev), __func__, page, regnum));
   11048 
   11049 	/*
   11050 	 * 2) Access PHY wakeup register.
   11051 	 * See wm_access_phy_wakeup_reg_bm.
   11052 	 */
   11053 
   11054 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11055 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11056 	if (rv != 0)
   11057 		return rv;
   11058 
   11059 	if (rd) {
   11060 		/* Read the Wakeup register page value using opcode 0x12 */
   11061 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11062 	} else {
   11063 		/* Write the Wakeup register page value using opcode 0x12 */
   11064 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11065 	}
   11066 	if (rv != 0)
   11067 		return rv;
   11068 
   11069 	if (!page_set)
   11070 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11071 
   11072 	return rv;
   11073 }
   11074 
   11075 /*
   11076  * wm_gmii_hv_readreg:	[mii interface function]
   11077  *
   11078  *	Read a PHY register on the kumeran
   11079  * This could be handled by the PHY layer if we didn't have to lock the
   11080  * ressource ...
   11081  */
   11082 static int
   11083 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11084 {
   11085 	struct wm_softc *sc = device_private(dev);
   11086 	int rv;
   11087 
   11088 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11089 		device_xname(dev), __func__));
   11090 	if (sc->phy.acquire(sc)) {
   11091 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11092 		return -1;
   11093 	}
   11094 
   11095 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11096 	sc->phy.release(sc);
   11097 	return rv;
   11098 }
   11099 
   11100 static int
   11101 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11102 {
   11103 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11104 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11105 	int rv;
   11106 
   11107 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11108 
   11109 	/* Page 800 works differently than the rest so it has its own func */
   11110 	if (page == BM_WUC_PAGE)
   11111 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11112 
   11113 	/*
   11114 	 * Lower than page 768 works differently than the rest so it has its
   11115 	 * own func
   11116 	 */
   11117 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11118 		printf("gmii_hv_readreg!!!\n");
   11119 		return -1;
   11120 	}
   11121 
   11122 	/*
   11123 	 * XXX I21[789] documents say that the SMBus Address register is at
   11124 	 * PHY address 01, Page 0 (not 768), Register 26.
   11125 	 */
   11126 	if (page == HV_INTC_FC_PAGE_START)
   11127 		page = 0;
   11128 
   11129 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11130 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11131 		    page << BME1000_PAGE_SHIFT);
   11132 		if (rv != 0)
   11133 			return rv;
   11134 	}
   11135 
   11136 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11137 }
   11138 
   11139 /*
   11140  * wm_gmii_hv_writereg:	[mii interface function]
   11141  *
   11142  *	Write a PHY register on the kumeran.
   11143  * This could be handled by the PHY layer if we didn't have to lock the
   11144  * ressource ...
   11145  */
   11146 static int
   11147 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11148 {
   11149 	struct wm_softc *sc = device_private(dev);
   11150 	int rv;
   11151 
   11152 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11153 		device_xname(dev), __func__));
   11154 
   11155 	if (sc->phy.acquire(sc)) {
   11156 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11157 		return -1;
   11158 	}
   11159 
   11160 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11161 	sc->phy.release(sc);
   11162 
   11163 	return rv;
   11164 }
   11165 
   11166 static int
   11167 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11168 {
   11169 	struct wm_softc *sc = device_private(dev);
   11170 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11171 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11172 	int rv;
   11173 
   11174 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11175 
   11176 	/* Page 800 works differently than the rest so it has its own func */
   11177 	if (page == BM_WUC_PAGE)
   11178 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11179 		    false);
   11180 
   11181 	/*
   11182 	 * Lower than page 768 works differently than the rest so it has its
   11183 	 * own func
   11184 	 */
   11185 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11186 		printf("gmii_hv_writereg!!!\n");
   11187 		return -1;
   11188 	}
   11189 
   11190 	{
   11191 		/*
   11192 		 * XXX I21[789] documents say that the SMBus Address register
   11193 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11194 		 */
   11195 		if (page == HV_INTC_FC_PAGE_START)
   11196 			page = 0;
   11197 
   11198 		/*
   11199 		 * XXX Workaround MDIO accesses being disabled after entering
   11200 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11201 		 * register is set)
   11202 		 */
   11203 		if (sc->sc_phytype == WMPHY_82578) {
   11204 			struct mii_softc *child;
   11205 
   11206 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11207 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11208 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11209 			    && ((val & (1 << 11)) != 0)) {
   11210 				printf("XXX need workaround\n");
   11211 			}
   11212 		}
   11213 
   11214 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11215 			rv = wm_gmii_mdic_writereg(dev, 1,
   11216 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11217 			if (rv != 0)
   11218 				return rv;
   11219 		}
   11220 	}
   11221 
   11222 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11223 }
   11224 
   11225 /*
   11226  * wm_gmii_82580_readreg:	[mii interface function]
   11227  *
   11228  *	Read a PHY register on the 82580 and I350.
   11229  * This could be handled by the PHY layer if we didn't have to lock the
   11230  * ressource ...
   11231  */
   11232 static int
   11233 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11234 {
   11235 	struct wm_softc *sc = device_private(dev);
   11236 	int rv;
   11237 
   11238 	if (sc->phy.acquire(sc) != 0) {
   11239 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11240 		return -1;
   11241 	}
   11242 
   11243 #ifdef DIAGNOSTIC
   11244 	if (reg > MII_ADDRMASK) {
   11245 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11246 		    __func__, sc->sc_phytype, reg);
   11247 		reg &= MII_ADDRMASK;
   11248 	}
   11249 #endif
   11250 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11251 
   11252 	sc->phy.release(sc);
   11253 	return rv;
   11254 }
   11255 
   11256 /*
   11257  * wm_gmii_82580_writereg:	[mii interface function]
   11258  *
   11259  *	Write a PHY register on the 82580 and I350.
   11260  * This could be handled by the PHY layer if we didn't have to lock the
   11261  * ressource ...
   11262  */
   11263 static int
   11264 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11265 {
   11266 	struct wm_softc *sc = device_private(dev);
   11267 	int rv;
   11268 
   11269 	if (sc->phy.acquire(sc) != 0) {
   11270 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11271 		return -1;
   11272 	}
   11273 
   11274 #ifdef DIAGNOSTIC
   11275 	if (reg > MII_ADDRMASK) {
   11276 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11277 		    __func__, sc->sc_phytype, reg);
   11278 		reg &= MII_ADDRMASK;
   11279 	}
   11280 #endif
   11281 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11282 
   11283 	sc->phy.release(sc);
   11284 	return rv;
   11285 }
   11286 
   11287 /*
   11288  * wm_gmii_gs40g_readreg:	[mii interface function]
   11289  *
   11290  *	Read a PHY register on the I2100 and I211.
   11291  * This could be handled by the PHY layer if we didn't have to lock the
   11292  * ressource ...
   11293  */
   11294 static int
   11295 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11296 {
   11297 	struct wm_softc *sc = device_private(dev);
   11298 	int page, offset;
   11299 	int rv;
   11300 
   11301 	/* Acquire semaphore */
   11302 	if (sc->phy.acquire(sc)) {
   11303 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11304 		return -1;
   11305 	}
   11306 
   11307 	/* Page select */
   11308 	page = reg >> GS40G_PAGE_SHIFT;
   11309 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11310 	if (rv != 0)
   11311 		goto release;
   11312 
   11313 	/* Read reg */
   11314 	offset = reg & GS40G_OFFSET_MASK;
   11315 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11316 
   11317 release:
   11318 	sc->phy.release(sc);
   11319 	return rv;
   11320 }
   11321 
   11322 /*
   11323  * wm_gmii_gs40g_writereg:	[mii interface function]
   11324  *
   11325  *	Write a PHY register on the I210 and I211.
   11326  * This could be handled by the PHY layer if we didn't have to lock the
   11327  * ressource ...
   11328  */
   11329 static int
   11330 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11331 {
   11332 	struct wm_softc *sc = device_private(dev);
   11333 	uint16_t page;
   11334 	int offset, rv;
   11335 
   11336 	/* Acquire semaphore */
   11337 	if (sc->phy.acquire(sc)) {
   11338 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11339 		return -1;
   11340 	}
   11341 
   11342 	/* Page select */
   11343 	page = reg >> GS40G_PAGE_SHIFT;
   11344 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11345 	if (rv != 0)
   11346 		goto release;
   11347 
   11348 	/* Write reg */
   11349 	offset = reg & GS40G_OFFSET_MASK;
   11350 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11351 
   11352 release:
   11353 	/* Release semaphore */
   11354 	sc->phy.release(sc);
   11355 	return rv;
   11356 }
   11357 
   11358 /*
   11359  * wm_gmii_statchg:	[mii interface function]
   11360  *
   11361  *	Callback from MII layer when media changes.
   11362  */
   11363 static void
   11364 wm_gmii_statchg(struct ifnet *ifp)
   11365 {
   11366 	struct wm_softc *sc = ifp->if_softc;
   11367 	struct mii_data *mii = &sc->sc_mii;
   11368 
   11369 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11370 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11371 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11372 
   11373 	/* Get flow control negotiation result. */
   11374 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11375 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11376 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11377 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11378 	}
   11379 
   11380 	if (sc->sc_flowflags & IFM_FLOW) {
   11381 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11382 			sc->sc_ctrl |= CTRL_TFCE;
   11383 			sc->sc_fcrtl |= FCRTL_XONE;
   11384 		}
   11385 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11386 			sc->sc_ctrl |= CTRL_RFCE;
   11387 	}
   11388 
   11389 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11390 		DPRINTF(WM_DEBUG_LINK,
   11391 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11392 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11393 	} else {
   11394 		DPRINTF(WM_DEBUG_LINK,
   11395 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11396 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11397 	}
   11398 
   11399 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11400 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11401 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11402 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11403 	if (sc->sc_type == WM_T_80003) {
   11404 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11405 		case IFM_1000_T:
   11406 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11407 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11408 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11409 			break;
   11410 		default:
   11411 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11412 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11413 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11414 			break;
   11415 		}
   11416 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11417 	}
   11418 }
   11419 
   11420 /* kumeran related (80003, ICH* and PCH*) */
   11421 
   11422 /*
   11423  * wm_kmrn_readreg:
   11424  *
   11425  *	Read a kumeran register
   11426  */
   11427 static int
   11428 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11429 {
   11430 	int rv;
   11431 
   11432 	if (sc->sc_type == WM_T_80003)
   11433 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11434 	else
   11435 		rv = sc->phy.acquire(sc);
   11436 	if (rv != 0) {
   11437 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11438 		    __func__);
   11439 		return rv;
   11440 	}
   11441 
   11442 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11443 
   11444 	if (sc->sc_type == WM_T_80003)
   11445 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11446 	else
   11447 		sc->phy.release(sc);
   11448 
   11449 	return rv;
   11450 }
   11451 
   11452 static int
   11453 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11454 {
   11455 
   11456 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11457 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11458 	    KUMCTRLSTA_REN);
   11459 	CSR_WRITE_FLUSH(sc);
   11460 	delay(2);
   11461 
   11462 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11463 
   11464 	return 0;
   11465 }
   11466 
   11467 /*
   11468  * wm_kmrn_writereg:
   11469  *
   11470  *	Write a kumeran register
   11471  */
   11472 static int
   11473 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11474 {
   11475 	int rv;
   11476 
   11477 	if (sc->sc_type == WM_T_80003)
   11478 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11479 	else
   11480 		rv = sc->phy.acquire(sc);
   11481 	if (rv != 0) {
   11482 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11483 		    __func__);
   11484 		return rv;
   11485 	}
   11486 
   11487 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11488 
   11489 	if (sc->sc_type == WM_T_80003)
   11490 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11491 	else
   11492 		sc->phy.release(sc);
   11493 
   11494 	return rv;
   11495 }
   11496 
   11497 static int
   11498 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11499 {
   11500 
   11501 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11502 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11503 
   11504 	return 0;
   11505 }
   11506 
   11507 /*
   11508  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11509  * This access method is different from IEEE MMD.
   11510  */
   11511 static int
   11512 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11513 {
   11514 	struct wm_softc *sc = device_private(dev);
   11515 	int rv;
   11516 
   11517 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11518 	if (rv != 0)
   11519 		return rv;
   11520 
   11521 	if (rd)
   11522 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11523 	else
   11524 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11525 	return rv;
   11526 }
   11527 
   11528 static int
   11529 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11530 {
   11531 
   11532 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11533 }
   11534 
   11535 static int
   11536 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11537 {
   11538 
   11539 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11540 }
   11541 
   11542 /* SGMII related */
   11543 
   11544 /*
   11545  * wm_sgmii_uses_mdio
   11546  *
   11547  * Check whether the transaction is to the internal PHY or the external
   11548  * MDIO interface. Return true if it's MDIO.
   11549  */
   11550 static bool
   11551 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11552 {
   11553 	uint32_t reg;
   11554 	bool ismdio = false;
   11555 
   11556 	switch (sc->sc_type) {
   11557 	case WM_T_82575:
   11558 	case WM_T_82576:
   11559 		reg = CSR_READ(sc, WMREG_MDIC);
   11560 		ismdio = ((reg & MDIC_DEST) != 0);
   11561 		break;
   11562 	case WM_T_82580:
   11563 	case WM_T_I350:
   11564 	case WM_T_I354:
   11565 	case WM_T_I210:
   11566 	case WM_T_I211:
   11567 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11568 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11569 		break;
   11570 	default:
   11571 		break;
   11572 	}
   11573 
   11574 	return ismdio;
   11575 }
   11576 
   11577 /*
   11578  * wm_sgmii_readreg:	[mii interface function]
   11579  *
   11580  *	Read a PHY register on the SGMII
   11581  * This could be handled by the PHY layer if we didn't have to lock the
   11582  * ressource ...
   11583  */
   11584 static int
   11585 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11586 {
   11587 	struct wm_softc *sc = device_private(dev);
   11588 	int rv;
   11589 
   11590 	if (sc->phy.acquire(sc)) {
   11591 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11592 		return -1;
   11593 	}
   11594 
   11595 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11596 
   11597 	sc->phy.release(sc);
   11598 	return rv;
   11599 }
   11600 
   11601 static int
   11602 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11603 {
   11604 	struct wm_softc *sc = device_private(dev);
   11605 	uint32_t i2ccmd;
   11606 	int i, rv;
   11607 
   11608 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11609 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11610 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11611 
   11612 	/* Poll the ready bit */
   11613 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11614 		delay(50);
   11615 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11616 		if (i2ccmd & I2CCMD_READY)
   11617 			break;
   11618 	}
   11619 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11620 		device_printf(dev, "I2CCMD Read did not complete\n");
   11621 		rv = ETIMEDOUT;
   11622 	}
   11623 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11624 		device_printf(dev, "I2CCMD Error bit set\n");
   11625 		rv = EIO;
   11626 	}
   11627 
   11628 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11629 
   11630 	return rv;
   11631 }
   11632 
   11633 /*
   11634  * wm_sgmii_writereg:	[mii interface function]
   11635  *
   11636  *	Write a PHY register on the SGMII.
   11637  * This could be handled by the PHY layer if we didn't have to lock the
   11638  * ressource ...
   11639  */
   11640 static int
   11641 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11642 {
   11643 	struct wm_softc *sc = device_private(dev);
   11644 	int rv;
   11645 
   11646 	if (sc->phy.acquire(sc) != 0) {
   11647 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11648 		return -1;
   11649 	}
   11650 
   11651 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11652 
   11653 	sc->phy.release(sc);
   11654 
   11655 	return rv;
   11656 }
   11657 
   11658 static int
   11659 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11660 {
   11661 	struct wm_softc *sc = device_private(dev);
   11662 	uint32_t i2ccmd;
   11663 	uint16_t swapdata;
   11664 	int rv = 0;
   11665 	int i;
   11666 
   11667 	/* Swap the data bytes for the I2C interface */
   11668 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11669 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11670 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11671 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11672 
   11673 	/* Poll the ready bit */
   11674 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11675 		delay(50);
   11676 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11677 		if (i2ccmd & I2CCMD_READY)
   11678 			break;
   11679 	}
   11680 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11681 		device_printf(dev, "I2CCMD Write did not complete\n");
   11682 		rv = ETIMEDOUT;
   11683 	}
   11684 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11685 		device_printf(dev, "I2CCMD Error bit set\n");
   11686 		rv = EIO;
   11687 	}
   11688 
   11689 	return rv;
   11690 }
   11691 
   11692 /* TBI related */
   11693 
   11694 static bool
   11695 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11696 {
   11697 	bool sig;
   11698 
   11699 	sig = ctrl & CTRL_SWDPIN(1);
   11700 
   11701 	/*
   11702 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11703 	 * detect a signal, 1 if they don't.
   11704 	 */
   11705 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11706 		sig = !sig;
   11707 
   11708 	return sig;
   11709 }
   11710 
   11711 /*
   11712  * wm_tbi_mediainit:
   11713  *
   11714  *	Initialize media for use on 1000BASE-X devices.
   11715  */
   11716 static void
   11717 wm_tbi_mediainit(struct wm_softc *sc)
   11718 {
   11719 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11720 	const char *sep = "";
   11721 
   11722 	if (sc->sc_type < WM_T_82543)
   11723 		sc->sc_tipg = TIPG_WM_DFLT;
   11724 	else
   11725 		sc->sc_tipg = TIPG_LG_DFLT;
   11726 
   11727 	sc->sc_tbi_serdes_anegticks = 5;
   11728 
   11729 	/* Initialize our media structures */
   11730 	sc->sc_mii.mii_ifp = ifp;
   11731 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11732 
   11733 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11734 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11735 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11736 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11737 	else
   11738 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11739 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11740 
   11741 	/*
   11742 	 * SWD Pins:
   11743 	 *
   11744 	 *	0 = Link LED (output)
   11745 	 *	1 = Loss Of Signal (input)
   11746 	 */
   11747 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11748 
   11749 	/* XXX Perhaps this is only for TBI */
   11750 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11751 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11752 
   11753 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11754 		sc->sc_ctrl &= ~CTRL_LRST;
   11755 
   11756 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11757 
   11758 #define	ADD(ss, mm, dd)							\
   11759 do {									\
   11760 	aprint_normal("%s%s", sep, ss);					\
   11761 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11762 	sep = ", ";							\
   11763 } while (/*CONSTCOND*/0)
   11764 
   11765 	aprint_normal_dev(sc->sc_dev, "");
   11766 
   11767 	if (sc->sc_type == WM_T_I354) {
   11768 		uint32_t status;
   11769 
   11770 		status = CSR_READ(sc, WMREG_STATUS);
   11771 		if (((status & STATUS_2P5_SKU) != 0)
   11772 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11773 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11774 		} else
   11775 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11776 	} else if (sc->sc_type == WM_T_82545) {
   11777 		/* Only 82545 is LX (XXX except SFP) */
   11778 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11779 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11780 	} else {
   11781 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11782 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11783 	}
   11784 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11785 	aprint_normal("\n");
   11786 
   11787 #undef ADD
   11788 
   11789 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11790 }
   11791 
   11792 /*
   11793  * wm_tbi_mediachange:	[ifmedia interface function]
   11794  *
   11795  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11796  */
   11797 static int
   11798 wm_tbi_mediachange(struct ifnet *ifp)
   11799 {
   11800 	struct wm_softc *sc = ifp->if_softc;
   11801 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11802 	uint32_t status, ctrl;
   11803 	bool signal;
   11804 	int i;
   11805 
   11806 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11807 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11808 		/* XXX need some work for >= 82571 and < 82575 */
   11809 		if (sc->sc_type < WM_T_82575)
   11810 			return 0;
   11811 	}
   11812 
   11813 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11814 	    || (sc->sc_type >= WM_T_82575))
   11815 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11816 
   11817 	sc->sc_ctrl &= ~CTRL_LRST;
   11818 	sc->sc_txcw = TXCW_ANE;
   11819 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11820 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11821 	else if (ife->ifm_media & IFM_FDX)
   11822 		sc->sc_txcw |= TXCW_FD;
   11823 	else
   11824 		sc->sc_txcw |= TXCW_HD;
   11825 
   11826 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11827 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11828 
   11829 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11830 		device_xname(sc->sc_dev), sc->sc_txcw));
   11831 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11832 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11833 	CSR_WRITE_FLUSH(sc);
   11834 	delay(1000);
   11835 
   11836 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11837 	signal = wm_tbi_havesignal(sc, ctrl);
   11838 
   11839 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11840 		signal));
   11841 
   11842 	if (signal) {
   11843 		/* Have signal; wait for the link to come up. */
   11844 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11845 			delay(10000);
   11846 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11847 				break;
   11848 		}
   11849 
   11850 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11851 			device_xname(sc->sc_dev), i));
   11852 
   11853 		status = CSR_READ(sc, WMREG_STATUS);
   11854 		DPRINTF(WM_DEBUG_LINK,
   11855 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11856 			device_xname(sc->sc_dev), status, STATUS_LU));
   11857 		if (status & STATUS_LU) {
   11858 			/* Link is up. */
   11859 			DPRINTF(WM_DEBUG_LINK,
   11860 			    ("%s: LINK: set media -> link up %s\n",
   11861 				device_xname(sc->sc_dev),
   11862 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11863 
   11864 			/*
   11865 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11866 			 * so we should update sc->sc_ctrl
   11867 			 */
   11868 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11869 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11870 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11871 			if (status & STATUS_FD)
   11872 				sc->sc_tctl |=
   11873 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11874 			else
   11875 				sc->sc_tctl |=
   11876 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11877 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11878 				sc->sc_fcrtl |= FCRTL_XONE;
   11879 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11880 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11881 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11882 			sc->sc_tbi_linkup = 1;
   11883 		} else {
   11884 			if (i == WM_LINKUP_TIMEOUT)
   11885 				wm_check_for_link(sc);
   11886 			/* Link is down. */
   11887 			DPRINTF(WM_DEBUG_LINK,
   11888 			    ("%s: LINK: set media -> link down\n",
   11889 				device_xname(sc->sc_dev)));
   11890 			sc->sc_tbi_linkup = 0;
   11891 		}
   11892 	} else {
   11893 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11894 			device_xname(sc->sc_dev)));
   11895 		sc->sc_tbi_linkup = 0;
   11896 	}
   11897 
   11898 	wm_tbi_serdes_set_linkled(sc);
   11899 
   11900 	return 0;
   11901 }
   11902 
   11903 /*
   11904  * wm_tbi_mediastatus:	[ifmedia interface function]
   11905  *
   11906  *	Get the current interface media status on a 1000BASE-X device.
   11907  */
   11908 static void
   11909 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11910 {
   11911 	struct wm_softc *sc = ifp->if_softc;
   11912 	uint32_t ctrl, status;
   11913 
   11914 	ifmr->ifm_status = IFM_AVALID;
   11915 	ifmr->ifm_active = IFM_ETHER;
   11916 
   11917 	status = CSR_READ(sc, WMREG_STATUS);
   11918 	if ((status & STATUS_LU) == 0) {
   11919 		ifmr->ifm_active |= IFM_NONE;
   11920 		return;
   11921 	}
   11922 
   11923 	ifmr->ifm_status |= IFM_ACTIVE;
   11924 	/* Only 82545 is LX */
   11925 	if (sc->sc_type == WM_T_82545)
   11926 		ifmr->ifm_active |= IFM_1000_LX;
   11927 	else
   11928 		ifmr->ifm_active |= IFM_1000_SX;
   11929 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11930 		ifmr->ifm_active |= IFM_FDX;
   11931 	else
   11932 		ifmr->ifm_active |= IFM_HDX;
   11933 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11934 	if (ctrl & CTRL_RFCE)
   11935 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11936 	if (ctrl & CTRL_TFCE)
   11937 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11938 }
   11939 
   11940 /* XXX TBI only */
   11941 static int
   11942 wm_check_for_link(struct wm_softc *sc)
   11943 {
   11944 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11945 	uint32_t rxcw;
   11946 	uint32_t ctrl;
   11947 	uint32_t status;
   11948 	bool signal;
   11949 
   11950 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11951 		device_xname(sc->sc_dev), __func__));
   11952 
   11953 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11954 		/* XXX need some work for >= 82571 */
   11955 		if (sc->sc_type >= WM_T_82571) {
   11956 			sc->sc_tbi_linkup = 1;
   11957 			return 0;
   11958 		}
   11959 	}
   11960 
   11961 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11962 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11963 	status = CSR_READ(sc, WMREG_STATUS);
   11964 	signal = wm_tbi_havesignal(sc, ctrl);
   11965 
   11966 	DPRINTF(WM_DEBUG_LINK,
   11967 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11968 		device_xname(sc->sc_dev), __func__, signal,
   11969 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11970 
   11971 	/*
   11972 	 * SWDPIN   LU RXCW
   11973 	 *	0    0	  0
   11974 	 *	0    0	  1	(should not happen)
   11975 	 *	0    1	  0	(should not happen)
   11976 	 *	0    1	  1	(should not happen)
   11977 	 *	1    0	  0	Disable autonego and force linkup
   11978 	 *	1    0	  1	got /C/ but not linkup yet
   11979 	 *	1    1	  0	(linkup)
   11980 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11981 	 *
   11982 	 */
   11983 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11984 		DPRINTF(WM_DEBUG_LINK,
   11985 		    ("%s: %s: force linkup and fullduplex\n",
   11986 			device_xname(sc->sc_dev), __func__));
   11987 		sc->sc_tbi_linkup = 0;
   11988 		/* Disable auto-negotiation in the TXCW register */
   11989 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11990 
   11991 		/*
   11992 		 * Force link-up and also force full-duplex.
   11993 		 *
   11994 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11995 		 * so we should update sc->sc_ctrl
   11996 		 */
   11997 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11998 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11999 	} else if (((status & STATUS_LU) != 0)
   12000 	    && ((rxcw & RXCW_C) != 0)
   12001 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12002 		sc->sc_tbi_linkup = 1;
   12003 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12004 			device_xname(sc->sc_dev),
   12005 			__func__));
   12006 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12007 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12008 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12009 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12010 			device_xname(sc->sc_dev), __func__));
   12011 	} else {
   12012 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12013 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12014 			status));
   12015 	}
   12016 
   12017 	return 0;
   12018 }
   12019 
   12020 /*
   12021  * wm_tbi_tick:
   12022  *
   12023  *	Check the link on TBI devices.
   12024  *	This function acts as mii_tick().
   12025  */
   12026 static void
   12027 wm_tbi_tick(struct wm_softc *sc)
   12028 {
   12029 	struct mii_data *mii = &sc->sc_mii;
   12030 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12031 	uint32_t status;
   12032 
   12033 	KASSERT(WM_CORE_LOCKED(sc));
   12034 
   12035 	status = CSR_READ(sc, WMREG_STATUS);
   12036 
   12037 	/* XXX is this needed? */
   12038 	(void)CSR_READ(sc, WMREG_RXCW);
   12039 	(void)CSR_READ(sc, WMREG_CTRL);
   12040 
   12041 	/* set link status */
   12042 	if ((status & STATUS_LU) == 0) {
   12043 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12044 			device_xname(sc->sc_dev)));
   12045 		sc->sc_tbi_linkup = 0;
   12046 	} else if (sc->sc_tbi_linkup == 0) {
   12047 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12048 			device_xname(sc->sc_dev),
   12049 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12050 		sc->sc_tbi_linkup = 1;
   12051 		sc->sc_tbi_serdes_ticks = 0;
   12052 	}
   12053 
   12054 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12055 		goto setled;
   12056 
   12057 	if ((status & STATUS_LU) == 0) {
   12058 		sc->sc_tbi_linkup = 0;
   12059 		/* If the timer expired, retry autonegotiation */
   12060 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12061 		    && (++sc->sc_tbi_serdes_ticks
   12062 			>= sc->sc_tbi_serdes_anegticks)) {
   12063 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12064 			sc->sc_tbi_serdes_ticks = 0;
   12065 			/*
   12066 			 * Reset the link, and let autonegotiation do
   12067 			 * its thing
   12068 			 */
   12069 			sc->sc_ctrl |= CTRL_LRST;
   12070 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12071 			CSR_WRITE_FLUSH(sc);
   12072 			delay(1000);
   12073 			sc->sc_ctrl &= ~CTRL_LRST;
   12074 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12075 			CSR_WRITE_FLUSH(sc);
   12076 			delay(1000);
   12077 			CSR_WRITE(sc, WMREG_TXCW,
   12078 			    sc->sc_txcw & ~TXCW_ANE);
   12079 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12080 		}
   12081 	}
   12082 
   12083 setled:
   12084 	wm_tbi_serdes_set_linkled(sc);
   12085 }
   12086 
   12087 /* SERDES related */
   12088 static void
   12089 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12090 {
   12091 	uint32_t reg;
   12092 
   12093 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12094 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12095 		return;
   12096 
   12097 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12098 	reg |= PCS_CFG_PCS_EN;
   12099 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12100 
   12101 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12102 	reg &= ~CTRL_EXT_SWDPIN(3);
   12103 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12104 	CSR_WRITE_FLUSH(sc);
   12105 }
   12106 
   12107 static int
   12108 wm_serdes_mediachange(struct ifnet *ifp)
   12109 {
   12110 	struct wm_softc *sc = ifp->if_softc;
   12111 	bool pcs_autoneg = true; /* XXX */
   12112 	uint32_t ctrl_ext, pcs_lctl, reg;
   12113 
   12114 	/* XXX Currently, this function is not called on 8257[12] */
   12115 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12116 	    || (sc->sc_type >= WM_T_82575))
   12117 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12118 
   12119 	wm_serdes_power_up_link_82575(sc);
   12120 
   12121 	sc->sc_ctrl |= CTRL_SLU;
   12122 
   12123 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12124 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12125 
   12126 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12127 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12128 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12129 	case CTRL_EXT_LINK_MODE_SGMII:
   12130 		pcs_autoneg = true;
   12131 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12132 		break;
   12133 	case CTRL_EXT_LINK_MODE_1000KX:
   12134 		pcs_autoneg = false;
   12135 		/* FALLTHROUGH */
   12136 	default:
   12137 		if ((sc->sc_type == WM_T_82575)
   12138 		    || (sc->sc_type == WM_T_82576)) {
   12139 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12140 				pcs_autoneg = false;
   12141 		}
   12142 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12143 		    | CTRL_FRCFDX;
   12144 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12145 	}
   12146 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12147 
   12148 	if (pcs_autoneg) {
   12149 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12150 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12151 
   12152 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12153 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12154 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12155 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12156 	} else
   12157 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12158 
   12159 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12160 
   12161 
   12162 	return 0;
   12163 }
   12164 
   12165 static void
   12166 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12167 {
   12168 	struct wm_softc *sc = ifp->if_softc;
   12169 	struct mii_data *mii = &sc->sc_mii;
   12170 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12171 	uint32_t pcs_adv, pcs_lpab, reg;
   12172 
   12173 	ifmr->ifm_status = IFM_AVALID;
   12174 	ifmr->ifm_active = IFM_ETHER;
   12175 
   12176 	/* Check PCS */
   12177 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12178 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12179 		ifmr->ifm_active |= IFM_NONE;
   12180 		sc->sc_tbi_linkup = 0;
   12181 		goto setled;
   12182 	}
   12183 
   12184 	sc->sc_tbi_linkup = 1;
   12185 	ifmr->ifm_status |= IFM_ACTIVE;
   12186 	if (sc->sc_type == WM_T_I354) {
   12187 		uint32_t status;
   12188 
   12189 		status = CSR_READ(sc, WMREG_STATUS);
   12190 		if (((status & STATUS_2P5_SKU) != 0)
   12191 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12192 			ifmr->ifm_active |= IFM_2500_KX;
   12193 		} else
   12194 			ifmr->ifm_active |= IFM_1000_KX;
   12195 	} else {
   12196 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12197 		case PCS_LSTS_SPEED_10:
   12198 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12199 			break;
   12200 		case PCS_LSTS_SPEED_100:
   12201 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12202 			break;
   12203 		case PCS_LSTS_SPEED_1000:
   12204 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12205 			break;
   12206 		default:
   12207 			device_printf(sc->sc_dev, "Unknown speed\n");
   12208 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12209 			break;
   12210 		}
   12211 	}
   12212 	if ((reg & PCS_LSTS_FDX) != 0)
   12213 		ifmr->ifm_active |= IFM_FDX;
   12214 	else
   12215 		ifmr->ifm_active |= IFM_HDX;
   12216 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12217 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12218 		/* Check flow */
   12219 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12220 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12221 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12222 			goto setled;
   12223 		}
   12224 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12225 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12226 		DPRINTF(WM_DEBUG_LINK,
   12227 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12228 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12229 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12230 			mii->mii_media_active |= IFM_FLOW
   12231 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12232 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12233 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12234 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12235 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12236 			mii->mii_media_active |= IFM_FLOW
   12237 			    | IFM_ETH_TXPAUSE;
   12238 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12239 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12240 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12241 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12242 			mii->mii_media_active |= IFM_FLOW
   12243 			    | IFM_ETH_RXPAUSE;
   12244 		}
   12245 	}
   12246 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12247 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12248 setled:
   12249 	wm_tbi_serdes_set_linkled(sc);
   12250 }
   12251 
   12252 /*
   12253  * wm_serdes_tick:
   12254  *
   12255  *	Check the link on serdes devices.
   12256  */
   12257 static void
   12258 wm_serdes_tick(struct wm_softc *sc)
   12259 {
   12260 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12261 	struct mii_data *mii = &sc->sc_mii;
   12262 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12263 	uint32_t reg;
   12264 
   12265 	KASSERT(WM_CORE_LOCKED(sc));
   12266 
   12267 	mii->mii_media_status = IFM_AVALID;
   12268 	mii->mii_media_active = IFM_ETHER;
   12269 
   12270 	/* Check PCS */
   12271 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12272 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12273 		mii->mii_media_status |= IFM_ACTIVE;
   12274 		sc->sc_tbi_linkup = 1;
   12275 		sc->sc_tbi_serdes_ticks = 0;
   12276 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12277 		if ((reg & PCS_LSTS_FDX) != 0)
   12278 			mii->mii_media_active |= IFM_FDX;
   12279 		else
   12280 			mii->mii_media_active |= IFM_HDX;
   12281 	} else {
   12282 		mii->mii_media_status |= IFM_NONE;
   12283 		sc->sc_tbi_linkup = 0;
   12284 		/* If the timer expired, retry autonegotiation */
   12285 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12286 		    && (++sc->sc_tbi_serdes_ticks
   12287 			>= sc->sc_tbi_serdes_anegticks)) {
   12288 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12289 			sc->sc_tbi_serdes_ticks = 0;
   12290 			/* XXX */
   12291 			wm_serdes_mediachange(ifp);
   12292 		}
   12293 	}
   12294 
   12295 	wm_tbi_serdes_set_linkled(sc);
   12296 }
   12297 
   12298 /* SFP related */
   12299 
   12300 static int
   12301 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12302 {
   12303 	uint32_t i2ccmd;
   12304 	int i;
   12305 
   12306 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12307 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12308 
   12309 	/* Poll the ready bit */
   12310 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12311 		delay(50);
   12312 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12313 		if (i2ccmd & I2CCMD_READY)
   12314 			break;
   12315 	}
   12316 	if ((i2ccmd & I2CCMD_READY) == 0)
   12317 		return -1;
   12318 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12319 		return -1;
   12320 
   12321 	*data = i2ccmd & 0x00ff;
   12322 
   12323 	return 0;
   12324 }
   12325 
   12326 static uint32_t
   12327 wm_sfp_get_media_type(struct wm_softc *sc)
   12328 {
   12329 	uint32_t ctrl_ext;
   12330 	uint8_t val = 0;
   12331 	int timeout = 3;
   12332 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12333 	int rv = -1;
   12334 
   12335 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12336 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12337 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12338 	CSR_WRITE_FLUSH(sc);
   12339 
   12340 	/* Read SFP module data */
   12341 	while (timeout) {
   12342 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12343 		if (rv == 0)
   12344 			break;
   12345 		delay(100*1000); /* XXX too big */
   12346 		timeout--;
   12347 	}
   12348 	if (rv != 0)
   12349 		goto out;
   12350 	switch (val) {
   12351 	case SFF_SFP_ID_SFF:
   12352 		aprint_normal_dev(sc->sc_dev,
   12353 		    "Module/Connector soldered to board\n");
   12354 		break;
   12355 	case SFF_SFP_ID_SFP:
   12356 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12357 		break;
   12358 	case SFF_SFP_ID_UNKNOWN:
   12359 		goto out;
   12360 	default:
   12361 		break;
   12362 	}
   12363 
   12364 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12365 	if (rv != 0) {
   12366 		goto out;
   12367 	}
   12368 
   12369 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12370 		mediatype = WM_MEDIATYPE_SERDES;
   12371 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12372 		sc->sc_flags |= WM_F_SGMII;
   12373 		mediatype = WM_MEDIATYPE_COPPER;
   12374 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12375 		sc->sc_flags |= WM_F_SGMII;
   12376 		mediatype = WM_MEDIATYPE_SERDES;
   12377 	}
   12378 
   12379 out:
   12380 	/* Restore I2C interface setting */
   12381 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12382 
   12383 	return mediatype;
   12384 }
   12385 
   12386 /*
   12387  * NVM related.
   12388  * Microwire, SPI (w/wo EERD) and Flash.
   12389  */
   12390 
   12391 /* Both spi and uwire */
   12392 
   12393 /*
   12394  * wm_eeprom_sendbits:
   12395  *
   12396  *	Send a series of bits to the EEPROM.
   12397  */
   12398 static void
   12399 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12400 {
   12401 	uint32_t reg;
   12402 	int x;
   12403 
   12404 	reg = CSR_READ(sc, WMREG_EECD);
   12405 
   12406 	for (x = nbits; x > 0; x--) {
   12407 		if (bits & (1U << (x - 1)))
   12408 			reg |= EECD_DI;
   12409 		else
   12410 			reg &= ~EECD_DI;
   12411 		CSR_WRITE(sc, WMREG_EECD, reg);
   12412 		CSR_WRITE_FLUSH(sc);
   12413 		delay(2);
   12414 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12415 		CSR_WRITE_FLUSH(sc);
   12416 		delay(2);
   12417 		CSR_WRITE(sc, WMREG_EECD, reg);
   12418 		CSR_WRITE_FLUSH(sc);
   12419 		delay(2);
   12420 	}
   12421 }
   12422 
   12423 /*
   12424  * wm_eeprom_recvbits:
   12425  *
   12426  *	Receive a series of bits from the EEPROM.
   12427  */
   12428 static void
   12429 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12430 {
   12431 	uint32_t reg, val;
   12432 	int x;
   12433 
   12434 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12435 
   12436 	val = 0;
   12437 	for (x = nbits; x > 0; x--) {
   12438 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12439 		CSR_WRITE_FLUSH(sc);
   12440 		delay(2);
   12441 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12442 			val |= (1U << (x - 1));
   12443 		CSR_WRITE(sc, WMREG_EECD, reg);
   12444 		CSR_WRITE_FLUSH(sc);
   12445 		delay(2);
   12446 	}
   12447 	*valp = val;
   12448 }
   12449 
   12450 /* Microwire */
   12451 
   12452 /*
   12453  * wm_nvm_read_uwire:
   12454  *
   12455  *	Read a word from the EEPROM using the MicroWire protocol.
   12456  */
   12457 static int
   12458 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12459 {
   12460 	uint32_t reg, val;
   12461 	int i;
   12462 
   12463 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12464 		device_xname(sc->sc_dev), __func__));
   12465 
   12466 	if (sc->nvm.acquire(sc) != 0)
   12467 		return -1;
   12468 
   12469 	for (i = 0; i < wordcnt; i++) {
   12470 		/* Clear SK and DI. */
   12471 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12472 		CSR_WRITE(sc, WMREG_EECD, reg);
   12473 
   12474 		/*
   12475 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12476 		 * and Xen.
   12477 		 *
   12478 		 * We use this workaround only for 82540 because qemu's
   12479 		 * e1000 act as 82540.
   12480 		 */
   12481 		if (sc->sc_type == WM_T_82540) {
   12482 			reg |= EECD_SK;
   12483 			CSR_WRITE(sc, WMREG_EECD, reg);
   12484 			reg &= ~EECD_SK;
   12485 			CSR_WRITE(sc, WMREG_EECD, reg);
   12486 			CSR_WRITE_FLUSH(sc);
   12487 			delay(2);
   12488 		}
   12489 		/* XXX: end of workaround */
   12490 
   12491 		/* Set CHIP SELECT. */
   12492 		reg |= EECD_CS;
   12493 		CSR_WRITE(sc, WMREG_EECD, reg);
   12494 		CSR_WRITE_FLUSH(sc);
   12495 		delay(2);
   12496 
   12497 		/* Shift in the READ command. */
   12498 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12499 
   12500 		/* Shift in address. */
   12501 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12502 
   12503 		/* Shift out the data. */
   12504 		wm_eeprom_recvbits(sc, &val, 16);
   12505 		data[i] = val & 0xffff;
   12506 
   12507 		/* Clear CHIP SELECT. */
   12508 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12509 		CSR_WRITE(sc, WMREG_EECD, reg);
   12510 		CSR_WRITE_FLUSH(sc);
   12511 		delay(2);
   12512 	}
   12513 
   12514 	sc->nvm.release(sc);
   12515 	return 0;
   12516 }
   12517 
   12518 /* SPI */
   12519 
   12520 /*
   12521  * Set SPI and FLASH related information from the EECD register.
   12522  * For 82541 and 82547, the word size is taken from EEPROM.
   12523  */
   12524 static int
   12525 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12526 {
   12527 	int size;
   12528 	uint32_t reg;
   12529 	uint16_t data;
   12530 
   12531 	reg = CSR_READ(sc, WMREG_EECD);
   12532 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12533 
   12534 	/* Read the size of NVM from EECD by default */
   12535 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12536 	switch (sc->sc_type) {
   12537 	case WM_T_82541:
   12538 	case WM_T_82541_2:
   12539 	case WM_T_82547:
   12540 	case WM_T_82547_2:
   12541 		/* Set dummy value to access EEPROM */
   12542 		sc->sc_nvm_wordsize = 64;
   12543 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12544 			aprint_error_dev(sc->sc_dev,
   12545 			    "%s: failed to read EEPROM size\n", __func__);
   12546 		}
   12547 		reg = data;
   12548 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12549 		if (size == 0)
   12550 			size = 6; /* 64 word size */
   12551 		else
   12552 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12553 		break;
   12554 	case WM_T_80003:
   12555 	case WM_T_82571:
   12556 	case WM_T_82572:
   12557 	case WM_T_82573: /* SPI case */
   12558 	case WM_T_82574: /* SPI case */
   12559 	case WM_T_82583: /* SPI case */
   12560 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12561 		if (size > 14)
   12562 			size = 14;
   12563 		break;
   12564 	case WM_T_82575:
   12565 	case WM_T_82576:
   12566 	case WM_T_82580:
   12567 	case WM_T_I350:
   12568 	case WM_T_I354:
   12569 	case WM_T_I210:
   12570 	case WM_T_I211:
   12571 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12572 		if (size > 15)
   12573 			size = 15;
   12574 		break;
   12575 	default:
   12576 		aprint_error_dev(sc->sc_dev,
   12577 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12578 		return -1;
   12579 		break;
   12580 	}
   12581 
   12582 	sc->sc_nvm_wordsize = 1 << size;
   12583 
   12584 	return 0;
   12585 }
   12586 
   12587 /*
   12588  * wm_nvm_ready_spi:
   12589  *
   12590  *	Wait for a SPI EEPROM to be ready for commands.
   12591  */
   12592 static int
   12593 wm_nvm_ready_spi(struct wm_softc *sc)
   12594 {
   12595 	uint32_t val;
   12596 	int usec;
   12597 
   12598 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12599 		device_xname(sc->sc_dev), __func__));
   12600 
   12601 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12602 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12603 		wm_eeprom_recvbits(sc, &val, 8);
   12604 		if ((val & SPI_SR_RDY) == 0)
   12605 			break;
   12606 	}
   12607 	if (usec >= SPI_MAX_RETRIES) {
   12608 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12609 		return -1;
   12610 	}
   12611 	return 0;
   12612 }
   12613 
   12614 /*
   12615  * wm_nvm_read_spi:
   12616  *
   12617  *	Read a work from the EEPROM using the SPI protocol.
   12618  */
   12619 static int
   12620 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12621 {
   12622 	uint32_t reg, val;
   12623 	int i;
   12624 	uint8_t opc;
   12625 	int rv = 0;
   12626 
   12627 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12628 		device_xname(sc->sc_dev), __func__));
   12629 
   12630 	if (sc->nvm.acquire(sc) != 0)
   12631 		return -1;
   12632 
   12633 	/* Clear SK and CS. */
   12634 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12635 	CSR_WRITE(sc, WMREG_EECD, reg);
   12636 	CSR_WRITE_FLUSH(sc);
   12637 	delay(2);
   12638 
   12639 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12640 		goto out;
   12641 
   12642 	/* Toggle CS to flush commands. */
   12643 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12644 	CSR_WRITE_FLUSH(sc);
   12645 	delay(2);
   12646 	CSR_WRITE(sc, WMREG_EECD, reg);
   12647 	CSR_WRITE_FLUSH(sc);
   12648 	delay(2);
   12649 
   12650 	opc = SPI_OPC_READ;
   12651 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12652 		opc |= SPI_OPC_A8;
   12653 
   12654 	wm_eeprom_sendbits(sc, opc, 8);
   12655 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12656 
   12657 	for (i = 0; i < wordcnt; i++) {
   12658 		wm_eeprom_recvbits(sc, &val, 16);
   12659 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12660 	}
   12661 
   12662 	/* Raise CS and clear SK. */
   12663 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12664 	CSR_WRITE(sc, WMREG_EECD, reg);
   12665 	CSR_WRITE_FLUSH(sc);
   12666 	delay(2);
   12667 
   12668 out:
   12669 	sc->nvm.release(sc);
   12670 	return rv;
   12671 }
   12672 
   12673 /* Using with EERD */
   12674 
   12675 static int
   12676 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12677 {
   12678 	uint32_t attempts = 100000;
   12679 	uint32_t i, reg = 0;
   12680 	int32_t done = -1;
   12681 
   12682 	for (i = 0; i < attempts; i++) {
   12683 		reg = CSR_READ(sc, rw);
   12684 
   12685 		if (reg & EERD_DONE) {
   12686 			done = 0;
   12687 			break;
   12688 		}
   12689 		delay(5);
   12690 	}
   12691 
   12692 	return done;
   12693 }
   12694 
   12695 static int
   12696 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12697 {
   12698 	int i, eerd = 0;
   12699 	int rv = 0;
   12700 
   12701 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12702 		device_xname(sc->sc_dev), __func__));
   12703 
   12704 	if (sc->nvm.acquire(sc) != 0)
   12705 		return -1;
   12706 
   12707 	for (i = 0; i < wordcnt; i++) {
   12708 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12709 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12710 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12711 		if (rv != 0) {
   12712 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12713 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12714 			break;
   12715 		}
   12716 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12717 	}
   12718 
   12719 	sc->nvm.release(sc);
   12720 	return rv;
   12721 }
   12722 
   12723 /* Flash */
   12724 
   12725 static int
   12726 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12727 {
   12728 	uint32_t eecd;
   12729 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12730 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12731 	uint32_t nvm_dword = 0;
   12732 	uint8_t sig_byte = 0;
   12733 	int rv;
   12734 
   12735 	switch (sc->sc_type) {
   12736 	case WM_T_PCH_SPT:
   12737 	case WM_T_PCH_CNP:
   12738 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12739 		act_offset = ICH_NVM_SIG_WORD * 2;
   12740 
   12741 		/* Set bank to 0 in case flash read fails. */
   12742 		*bank = 0;
   12743 
   12744 		/* Check bank 0 */
   12745 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12746 		if (rv != 0)
   12747 			return rv;
   12748 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12749 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12750 			*bank = 0;
   12751 			return 0;
   12752 		}
   12753 
   12754 		/* Check bank 1 */
   12755 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12756 		    &nvm_dword);
   12757 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12758 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12759 			*bank = 1;
   12760 			return 0;
   12761 		}
   12762 		aprint_error_dev(sc->sc_dev,
   12763 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12764 		return -1;
   12765 	case WM_T_ICH8:
   12766 	case WM_T_ICH9:
   12767 		eecd = CSR_READ(sc, WMREG_EECD);
   12768 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12769 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12770 			return 0;
   12771 		}
   12772 		/* FALLTHROUGH */
   12773 	default:
   12774 		/* Default to 0 */
   12775 		*bank = 0;
   12776 
   12777 		/* Check bank 0 */
   12778 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12779 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12780 			*bank = 0;
   12781 			return 0;
   12782 		}
   12783 
   12784 		/* Check bank 1 */
   12785 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12786 		    &sig_byte);
   12787 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12788 			*bank = 1;
   12789 			return 0;
   12790 		}
   12791 	}
   12792 
   12793 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12794 		device_xname(sc->sc_dev)));
   12795 	return -1;
   12796 }
   12797 
   12798 /******************************************************************************
   12799  * This function does initial flash setup so that a new read/write/erase cycle
   12800  * can be started.
   12801  *
   12802  * sc - The pointer to the hw structure
   12803  ****************************************************************************/
   12804 static int32_t
   12805 wm_ich8_cycle_init(struct wm_softc *sc)
   12806 {
   12807 	uint16_t hsfsts;
   12808 	int32_t error = 1;
   12809 	int32_t i     = 0;
   12810 
   12811 	if (sc->sc_type >= WM_T_PCH_SPT)
   12812 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12813 	else
   12814 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12815 
   12816 	/* May be check the Flash Des Valid bit in Hw status */
   12817 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12818 		return error;
   12819 
   12820 	/* Clear FCERR in Hw status by writing 1 */
   12821 	/* Clear DAEL in Hw status by writing a 1 */
   12822 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12823 
   12824 	if (sc->sc_type >= WM_T_PCH_SPT)
   12825 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12826 	else
   12827 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12828 
   12829 	/*
   12830 	 * Either we should have a hardware SPI cycle in progress bit to check
   12831 	 * against, in order to start a new cycle or FDONE bit should be
   12832 	 * changed in the hardware so that it is 1 after harware reset, which
   12833 	 * can then be used as an indication whether a cycle is in progress or
   12834 	 * has been completed .. we should also have some software semaphore
   12835 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12836 	 * threads access to those bits can be sequentiallized or a way so that
   12837 	 * 2 threads dont start the cycle at the same time
   12838 	 */
   12839 
   12840 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12841 		/*
   12842 		 * There is no cycle running at present, so we can start a
   12843 		 * cycle
   12844 		 */
   12845 
   12846 		/* Begin by setting Flash Cycle Done. */
   12847 		hsfsts |= HSFSTS_DONE;
   12848 		if (sc->sc_type >= WM_T_PCH_SPT)
   12849 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12850 			    hsfsts & 0xffffUL);
   12851 		else
   12852 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12853 		error = 0;
   12854 	} else {
   12855 		/*
   12856 		 * Otherwise poll for sometime so the current cycle has a
   12857 		 * chance to end before giving up.
   12858 		 */
   12859 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12860 			if (sc->sc_type >= WM_T_PCH_SPT)
   12861 				hsfsts = ICH8_FLASH_READ32(sc,
   12862 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12863 			else
   12864 				hsfsts = ICH8_FLASH_READ16(sc,
   12865 				    ICH_FLASH_HSFSTS);
   12866 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12867 				error = 0;
   12868 				break;
   12869 			}
   12870 			delay(1);
   12871 		}
   12872 		if (error == 0) {
   12873 			/*
   12874 			 * Successful in waiting for previous cycle to timeout,
   12875 			 * now set the Flash Cycle Done.
   12876 			 */
   12877 			hsfsts |= HSFSTS_DONE;
   12878 			if (sc->sc_type >= WM_T_PCH_SPT)
   12879 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12880 				    hsfsts & 0xffffUL);
   12881 			else
   12882 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12883 				    hsfsts);
   12884 		}
   12885 	}
   12886 	return error;
   12887 }
   12888 
   12889 /******************************************************************************
   12890  * This function starts a flash cycle and waits for its completion
   12891  *
   12892  * sc - The pointer to the hw structure
   12893  ****************************************************************************/
   12894 static int32_t
   12895 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12896 {
   12897 	uint16_t hsflctl;
   12898 	uint16_t hsfsts;
   12899 	int32_t error = 1;
   12900 	uint32_t i = 0;
   12901 
   12902 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12903 	if (sc->sc_type >= WM_T_PCH_SPT)
   12904 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12905 	else
   12906 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12907 	hsflctl |= HSFCTL_GO;
   12908 	if (sc->sc_type >= WM_T_PCH_SPT)
   12909 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12910 		    (uint32_t)hsflctl << 16);
   12911 	else
   12912 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12913 
   12914 	/* Wait till FDONE bit is set to 1 */
   12915 	do {
   12916 		if (sc->sc_type >= WM_T_PCH_SPT)
   12917 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12918 			    & 0xffffUL;
   12919 		else
   12920 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12921 		if (hsfsts & HSFSTS_DONE)
   12922 			break;
   12923 		delay(1);
   12924 		i++;
   12925 	} while (i < timeout);
   12926 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12927 		error = 0;
   12928 
   12929 	return error;
   12930 }
   12931 
   12932 /******************************************************************************
   12933  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12934  *
   12935  * sc - The pointer to the hw structure
   12936  * index - The index of the byte or word to read.
   12937  * size - Size of data to read, 1=byte 2=word, 4=dword
   12938  * data - Pointer to the word to store the value read.
   12939  *****************************************************************************/
   12940 static int32_t
   12941 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12942     uint32_t size, uint32_t *data)
   12943 {
   12944 	uint16_t hsfsts;
   12945 	uint16_t hsflctl;
   12946 	uint32_t flash_linear_address;
   12947 	uint32_t flash_data = 0;
   12948 	int32_t error = 1;
   12949 	int32_t count = 0;
   12950 
   12951 	if (size < 1  || size > 4 || data == 0x0 ||
   12952 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12953 		return error;
   12954 
   12955 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12956 	    sc->sc_ich8_flash_base;
   12957 
   12958 	do {
   12959 		delay(1);
   12960 		/* Steps */
   12961 		error = wm_ich8_cycle_init(sc);
   12962 		if (error)
   12963 			break;
   12964 
   12965 		if (sc->sc_type >= WM_T_PCH_SPT)
   12966 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12967 			    >> 16;
   12968 		else
   12969 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12970 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12971 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12972 		    & HSFCTL_BCOUNT_MASK;
   12973 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12974 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12975 			/*
   12976 			 * In SPT, This register is in Lan memory space, not
   12977 			 * flash. Therefore, only 32 bit access is supported.
   12978 			 */
   12979 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12980 			    (uint32_t)hsflctl << 16);
   12981 		} else
   12982 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12983 
   12984 		/*
   12985 		 * Write the last 24 bits of index into Flash Linear address
   12986 		 * field in Flash Address
   12987 		 */
   12988 		/* TODO: TBD maybe check the index against the size of flash */
   12989 
   12990 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12991 
   12992 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12993 
   12994 		/*
   12995 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12996 		 * the whole sequence a few more times, else read in (shift in)
   12997 		 * the Flash Data0, the order is least significant byte first
   12998 		 * msb to lsb
   12999 		 */
   13000 		if (error == 0) {
   13001 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13002 			if (size == 1)
   13003 				*data = (uint8_t)(flash_data & 0x000000FF);
   13004 			else if (size == 2)
   13005 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13006 			else if (size == 4)
   13007 				*data = (uint32_t)flash_data;
   13008 			break;
   13009 		} else {
   13010 			/*
   13011 			 * If we've gotten here, then things are probably
   13012 			 * completely hosed, but if the error condition is
   13013 			 * detected, it won't hurt to give it another try...
   13014 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13015 			 */
   13016 			if (sc->sc_type >= WM_T_PCH_SPT)
   13017 				hsfsts = ICH8_FLASH_READ32(sc,
   13018 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13019 			else
   13020 				hsfsts = ICH8_FLASH_READ16(sc,
   13021 				    ICH_FLASH_HSFSTS);
   13022 
   13023 			if (hsfsts & HSFSTS_ERR) {
   13024 				/* Repeat for some time before giving up. */
   13025 				continue;
   13026 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13027 				break;
   13028 		}
   13029 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13030 
   13031 	return error;
   13032 }
   13033 
   13034 /******************************************************************************
   13035  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13036  *
   13037  * sc - pointer to wm_hw structure
   13038  * index - The index of the byte to read.
   13039  * data - Pointer to a byte to store the value read.
   13040  *****************************************************************************/
   13041 static int32_t
   13042 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13043 {
   13044 	int32_t status;
   13045 	uint32_t word = 0;
   13046 
   13047 	status = wm_read_ich8_data(sc, index, 1, &word);
   13048 	if (status == 0)
   13049 		*data = (uint8_t)word;
   13050 	else
   13051 		*data = 0;
   13052 
   13053 	return status;
   13054 }
   13055 
   13056 /******************************************************************************
   13057  * Reads a word from the NVM using the ICH8 flash access registers.
   13058  *
   13059  * sc - pointer to wm_hw structure
   13060  * index - The starting byte index of the word to read.
   13061  * data - Pointer to a word to store the value read.
   13062  *****************************************************************************/
   13063 static int32_t
   13064 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13065 {
   13066 	int32_t status;
   13067 	uint32_t word = 0;
   13068 
   13069 	status = wm_read_ich8_data(sc, index, 2, &word);
   13070 	if (status == 0)
   13071 		*data = (uint16_t)word;
   13072 	else
   13073 		*data = 0;
   13074 
   13075 	return status;
   13076 }
   13077 
   13078 /******************************************************************************
   13079  * Reads a dword from the NVM using the ICH8 flash access registers.
   13080  *
   13081  * sc - pointer to wm_hw structure
   13082  * index - The starting byte index of the word to read.
   13083  * data - Pointer to a word to store the value read.
   13084  *****************************************************************************/
   13085 static int32_t
   13086 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13087 {
   13088 	int32_t status;
   13089 
   13090 	status = wm_read_ich8_data(sc, index, 4, data);
   13091 	return status;
   13092 }
   13093 
   13094 /******************************************************************************
   13095  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13096  * register.
   13097  *
   13098  * sc - Struct containing variables accessed by shared code
   13099  * offset - offset of word in the EEPROM to read
   13100  * data - word read from the EEPROM
   13101  * words - number of words to read
   13102  *****************************************************************************/
   13103 static int
   13104 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13105 {
   13106 	int32_t	 rv = 0;
   13107 	uint32_t flash_bank = 0;
   13108 	uint32_t act_offset = 0;
   13109 	uint32_t bank_offset = 0;
   13110 	uint16_t word = 0;
   13111 	uint16_t i = 0;
   13112 
   13113 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13114 		device_xname(sc->sc_dev), __func__));
   13115 
   13116 	if (sc->nvm.acquire(sc) != 0)
   13117 		return -1;
   13118 
   13119 	/*
   13120 	 * We need to know which is the valid flash bank.  In the event
   13121 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13122 	 * managing flash_bank. So it cannot be trusted and needs
   13123 	 * to be updated with each read.
   13124 	 */
   13125 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13126 	if (rv) {
   13127 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13128 			device_xname(sc->sc_dev)));
   13129 		flash_bank = 0;
   13130 	}
   13131 
   13132 	/*
   13133 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13134 	 * size
   13135 	 */
   13136 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13137 
   13138 	for (i = 0; i < words; i++) {
   13139 		/* The NVM part needs a byte offset, hence * 2 */
   13140 		act_offset = bank_offset + ((offset + i) * 2);
   13141 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13142 		if (rv) {
   13143 			aprint_error_dev(sc->sc_dev,
   13144 			    "%s: failed to read NVM\n", __func__);
   13145 			break;
   13146 		}
   13147 		data[i] = word;
   13148 	}
   13149 
   13150 	sc->nvm.release(sc);
   13151 	return rv;
   13152 }
   13153 
   13154 /******************************************************************************
   13155  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13156  * register.
   13157  *
   13158  * sc - Struct containing variables accessed by shared code
   13159  * offset - offset of word in the EEPROM to read
   13160  * data - word read from the EEPROM
   13161  * words - number of words to read
   13162  *****************************************************************************/
   13163 static int
   13164 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13165 {
   13166 	int32_t	 rv = 0;
   13167 	uint32_t flash_bank = 0;
   13168 	uint32_t act_offset = 0;
   13169 	uint32_t bank_offset = 0;
   13170 	uint32_t dword = 0;
   13171 	uint16_t i = 0;
   13172 
   13173 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13174 		device_xname(sc->sc_dev), __func__));
   13175 
   13176 	if (sc->nvm.acquire(sc) != 0)
   13177 		return -1;
   13178 
   13179 	/*
   13180 	 * We need to know which is the valid flash bank.  In the event
   13181 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13182 	 * managing flash_bank. So it cannot be trusted and needs
   13183 	 * to be updated with each read.
   13184 	 */
   13185 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13186 	if (rv) {
   13187 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13188 			device_xname(sc->sc_dev)));
   13189 		flash_bank = 0;
   13190 	}
   13191 
   13192 	/*
   13193 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13194 	 * size
   13195 	 */
   13196 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13197 
   13198 	for (i = 0; i < words; i++) {
   13199 		/* The NVM part needs a byte offset, hence * 2 */
   13200 		act_offset = bank_offset + ((offset + i) * 2);
   13201 		/* but we must read dword aligned, so mask ... */
   13202 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13203 		if (rv) {
   13204 			aprint_error_dev(sc->sc_dev,
   13205 			    "%s: failed to read NVM\n", __func__);
   13206 			break;
   13207 		}
   13208 		/* ... and pick out low or high word */
   13209 		if ((act_offset & 0x2) == 0)
   13210 			data[i] = (uint16_t)(dword & 0xFFFF);
   13211 		else
   13212 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13213 	}
   13214 
   13215 	sc->nvm.release(sc);
   13216 	return rv;
   13217 }
   13218 
   13219 /* iNVM */
   13220 
   13221 static int
   13222 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13223 {
   13224 	int32_t	 rv = 0;
   13225 	uint32_t invm_dword;
   13226 	uint16_t i;
   13227 	uint8_t record_type, word_address;
   13228 
   13229 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13230 		device_xname(sc->sc_dev), __func__));
   13231 
   13232 	for (i = 0; i < INVM_SIZE; i++) {
   13233 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13234 		/* Get record type */
   13235 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13236 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13237 			break;
   13238 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13239 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13240 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13241 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13242 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13243 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13244 			if (word_address == address) {
   13245 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13246 				rv = 0;
   13247 				break;
   13248 			}
   13249 		}
   13250 	}
   13251 
   13252 	return rv;
   13253 }
   13254 
   13255 static int
   13256 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13257 {
   13258 	int rv = 0;
   13259 	int i;
   13260 
   13261 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13262 		device_xname(sc->sc_dev), __func__));
   13263 
   13264 	if (sc->nvm.acquire(sc) != 0)
   13265 		return -1;
   13266 
   13267 	for (i = 0; i < words; i++) {
   13268 		switch (offset + i) {
   13269 		case NVM_OFF_MACADDR:
   13270 		case NVM_OFF_MACADDR1:
   13271 		case NVM_OFF_MACADDR2:
   13272 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13273 			if (rv != 0) {
   13274 				data[i] = 0xffff;
   13275 				rv = -1;
   13276 			}
   13277 			break;
   13278 		case NVM_OFF_CFG2:
   13279 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13280 			if (rv != 0) {
   13281 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13282 				rv = 0;
   13283 			}
   13284 			break;
   13285 		case NVM_OFF_CFG4:
   13286 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13287 			if (rv != 0) {
   13288 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13289 				rv = 0;
   13290 			}
   13291 			break;
   13292 		case NVM_OFF_LED_1_CFG:
   13293 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13294 			if (rv != 0) {
   13295 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13296 				rv = 0;
   13297 			}
   13298 			break;
   13299 		case NVM_OFF_LED_0_2_CFG:
   13300 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13301 			if (rv != 0) {
   13302 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13303 				rv = 0;
   13304 			}
   13305 			break;
   13306 		case NVM_OFF_ID_LED_SETTINGS:
   13307 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13308 			if (rv != 0) {
   13309 				*data = ID_LED_RESERVED_FFFF;
   13310 				rv = 0;
   13311 			}
   13312 			break;
   13313 		default:
   13314 			DPRINTF(WM_DEBUG_NVM,
   13315 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13316 			*data = NVM_RESERVED_WORD;
   13317 			break;
   13318 		}
   13319 	}
   13320 
   13321 	sc->nvm.release(sc);
   13322 	return rv;
   13323 }
   13324 
   13325 /* Lock, detecting NVM type, validate checksum, version and read */
   13326 
   13327 static int
   13328 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13329 {
   13330 	uint32_t eecd = 0;
   13331 
   13332 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13333 	    || sc->sc_type == WM_T_82583) {
   13334 		eecd = CSR_READ(sc, WMREG_EECD);
   13335 
   13336 		/* Isolate bits 15 & 16 */
   13337 		eecd = ((eecd >> 15) & 0x03);
   13338 
   13339 		/* If both bits are set, device is Flash type */
   13340 		if (eecd == 0x03)
   13341 			return 0;
   13342 	}
   13343 	return 1;
   13344 }
   13345 
   13346 static int
   13347 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13348 {
   13349 	uint32_t eec;
   13350 
   13351 	eec = CSR_READ(sc, WMREG_EEC);
   13352 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13353 		return 1;
   13354 
   13355 	return 0;
   13356 }
   13357 
   13358 /*
   13359  * wm_nvm_validate_checksum
   13360  *
   13361  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13362  */
   13363 static int
   13364 wm_nvm_validate_checksum(struct wm_softc *sc)
   13365 {
   13366 	uint16_t checksum;
   13367 	uint16_t eeprom_data;
   13368 #ifdef WM_DEBUG
   13369 	uint16_t csum_wordaddr, valid_checksum;
   13370 #endif
   13371 	int i;
   13372 
   13373 	checksum = 0;
   13374 
   13375 	/* Don't check for I211 */
   13376 	if (sc->sc_type == WM_T_I211)
   13377 		return 0;
   13378 
   13379 #ifdef WM_DEBUG
   13380 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13381 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13382 		csum_wordaddr = NVM_OFF_COMPAT;
   13383 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13384 	} else {
   13385 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13386 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13387 	}
   13388 
   13389 	/* Dump EEPROM image for debug */
   13390 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13391 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13392 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13393 		/* XXX PCH_SPT? */
   13394 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13395 		if ((eeprom_data & valid_checksum) == 0)
   13396 			DPRINTF(WM_DEBUG_NVM,
   13397 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13398 				device_xname(sc->sc_dev), eeprom_data,
   13399 				    valid_checksum));
   13400 	}
   13401 
   13402 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13403 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13404 		for (i = 0; i < NVM_SIZE; i++) {
   13405 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13406 				printf("XXXX ");
   13407 			else
   13408 				printf("%04hx ", eeprom_data);
   13409 			if (i % 8 == 7)
   13410 				printf("\n");
   13411 		}
   13412 	}
   13413 
   13414 #endif /* WM_DEBUG */
   13415 
   13416 	for (i = 0; i < NVM_SIZE; i++) {
   13417 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13418 			return 1;
   13419 		checksum += eeprom_data;
   13420 	}
   13421 
   13422 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13423 #ifdef WM_DEBUG
   13424 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13425 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13426 #endif
   13427 	}
   13428 
   13429 	return 0;
   13430 }
   13431 
   13432 static void
   13433 wm_nvm_version_invm(struct wm_softc *sc)
   13434 {
   13435 	uint32_t dword;
   13436 
   13437 	/*
   13438 	 * Linux's code to decode version is very strange, so we don't
   13439 	 * obey that algorithm and just use word 61 as the document.
   13440 	 * Perhaps it's not perfect though...
   13441 	 *
   13442 	 * Example:
   13443 	 *
   13444 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13445 	 */
   13446 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13447 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13448 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13449 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13450 }
   13451 
   13452 static void
   13453 wm_nvm_version(struct wm_softc *sc)
   13454 {
   13455 	uint16_t major, minor, build, patch;
   13456 	uint16_t uid0, uid1;
   13457 	uint16_t nvm_data;
   13458 	uint16_t off;
   13459 	bool check_version = false;
   13460 	bool check_optionrom = false;
   13461 	bool have_build = false;
   13462 	bool have_uid = true;
   13463 
   13464 	/*
   13465 	 * Version format:
   13466 	 *
   13467 	 * XYYZ
   13468 	 * X0YZ
   13469 	 * X0YY
   13470 	 *
   13471 	 * Example:
   13472 	 *
   13473 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13474 	 *	82571	0x50a6	5.10.6?
   13475 	 *	82572	0x506a	5.6.10?
   13476 	 *	82572EI	0x5069	5.6.9?
   13477 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13478 	 *		0x2013	2.1.3?
   13479 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13480 	 * ICH8+82567	0x0040	0.4.0?
   13481 	 * ICH9+82566	0x1040	1.4.0?
   13482 	 *ICH10+82567	0x0043	0.4.3?
   13483 	 *  PCH+82577	0x00c1	0.12.1?
   13484 	 * PCH2+82579	0x00d3	0.13.3?
   13485 	 *		0x00d4	0.13.4?
   13486 	 *  LPT+I218	0x0023	0.2.3?
   13487 	 *  SPT+I219	0x0084	0.8.4?
   13488 	 *  CNP+I219	0x0054	0.5.4?
   13489 	 */
   13490 
   13491 	/*
   13492 	 * XXX
   13493 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13494 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13495 	 */
   13496 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13497 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13498 		have_uid = false;
   13499 
   13500 	switch (sc->sc_type) {
   13501 	case WM_T_82571:
   13502 	case WM_T_82572:
   13503 	case WM_T_82574:
   13504 	case WM_T_82583:
   13505 		check_version = true;
   13506 		check_optionrom = true;
   13507 		have_build = true;
   13508 		break;
   13509 	case WM_T_ICH8:
   13510 	case WM_T_ICH9:
   13511 	case WM_T_ICH10:
   13512 	case WM_T_PCH:
   13513 	case WM_T_PCH2:
   13514 	case WM_T_PCH_LPT:
   13515 	case WM_T_PCH_SPT:
   13516 	case WM_T_PCH_CNP:
   13517 		check_version = true;
   13518 		have_build = true;
   13519 		have_uid = false;
   13520 		break;
   13521 	case WM_T_82575:
   13522 	case WM_T_82576:
   13523 	case WM_T_82580:
   13524 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13525 			check_version = true;
   13526 		break;
   13527 	case WM_T_I211:
   13528 		wm_nvm_version_invm(sc);
   13529 		have_uid = false;
   13530 		goto printver;
   13531 	case WM_T_I210:
   13532 		if (!wm_nvm_flash_presence_i210(sc)) {
   13533 			wm_nvm_version_invm(sc);
   13534 			have_uid = false;
   13535 			goto printver;
   13536 		}
   13537 		/* FALLTHROUGH */
   13538 	case WM_T_I350:
   13539 	case WM_T_I354:
   13540 		check_version = true;
   13541 		check_optionrom = true;
   13542 		break;
   13543 	default:
   13544 		return;
   13545 	}
   13546 	if (check_version
   13547 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13548 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13549 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13550 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13551 			build = nvm_data & NVM_BUILD_MASK;
   13552 			have_build = true;
   13553 		} else
   13554 			minor = nvm_data & 0x00ff;
   13555 
   13556 		/* Decimal */
   13557 		minor = (minor / 16) * 10 + (minor % 16);
   13558 		sc->sc_nvm_ver_major = major;
   13559 		sc->sc_nvm_ver_minor = minor;
   13560 
   13561 printver:
   13562 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13563 		    sc->sc_nvm_ver_minor);
   13564 		if (have_build) {
   13565 			sc->sc_nvm_ver_build = build;
   13566 			aprint_verbose(".%d", build);
   13567 		}
   13568 	}
   13569 
   13570 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13571 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13572 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13573 		/* Option ROM Version */
   13574 		if ((off != 0x0000) && (off != 0xffff)) {
   13575 			int rv;
   13576 
   13577 			off += NVM_COMBO_VER_OFF;
   13578 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13579 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13580 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13581 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13582 				/* 16bits */
   13583 				major = uid0 >> 8;
   13584 				build = (uid0 << 8) | (uid1 >> 8);
   13585 				patch = uid1 & 0x00ff;
   13586 				aprint_verbose(", option ROM Version %d.%d.%d",
   13587 				    major, build, patch);
   13588 			}
   13589 		}
   13590 	}
   13591 
   13592 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13593 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13594 }
   13595 
   13596 /*
   13597  * wm_nvm_read:
   13598  *
   13599  *	Read data from the serial EEPROM.
   13600  */
   13601 static int
   13602 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13603 {
   13604 	int rv;
   13605 
   13606 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13607 		device_xname(sc->sc_dev), __func__));
   13608 
   13609 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13610 		return -1;
   13611 
   13612 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13613 
   13614 	return rv;
   13615 }
   13616 
   13617 /*
   13618  * Hardware semaphores.
   13619  * Very complexed...
   13620  */
   13621 
   13622 static int
   13623 wm_get_null(struct wm_softc *sc)
   13624 {
   13625 
   13626 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13627 		device_xname(sc->sc_dev), __func__));
   13628 	return 0;
   13629 }
   13630 
   13631 static void
   13632 wm_put_null(struct wm_softc *sc)
   13633 {
   13634 
   13635 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13636 		device_xname(sc->sc_dev), __func__));
   13637 	return;
   13638 }
   13639 
   13640 static int
   13641 wm_get_eecd(struct wm_softc *sc)
   13642 {
   13643 	uint32_t reg;
   13644 	int x;
   13645 
   13646 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13647 		device_xname(sc->sc_dev), __func__));
   13648 
   13649 	reg = CSR_READ(sc, WMREG_EECD);
   13650 
   13651 	/* Request EEPROM access. */
   13652 	reg |= EECD_EE_REQ;
   13653 	CSR_WRITE(sc, WMREG_EECD, reg);
   13654 
   13655 	/* ..and wait for it to be granted. */
   13656 	for (x = 0; x < 1000; x++) {
   13657 		reg = CSR_READ(sc, WMREG_EECD);
   13658 		if (reg & EECD_EE_GNT)
   13659 			break;
   13660 		delay(5);
   13661 	}
   13662 	if ((reg & EECD_EE_GNT) == 0) {
   13663 		aprint_error_dev(sc->sc_dev,
   13664 		    "could not acquire EEPROM GNT\n");
   13665 		reg &= ~EECD_EE_REQ;
   13666 		CSR_WRITE(sc, WMREG_EECD, reg);
   13667 		return -1;
   13668 	}
   13669 
   13670 	return 0;
   13671 }
   13672 
   13673 static void
   13674 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13675 {
   13676 
   13677 	*eecd |= EECD_SK;
   13678 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13679 	CSR_WRITE_FLUSH(sc);
   13680 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13681 		delay(1);
   13682 	else
   13683 		delay(50);
   13684 }
   13685 
   13686 static void
   13687 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13688 {
   13689 
   13690 	*eecd &= ~EECD_SK;
   13691 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13692 	CSR_WRITE_FLUSH(sc);
   13693 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13694 		delay(1);
   13695 	else
   13696 		delay(50);
   13697 }
   13698 
   13699 static void
   13700 wm_put_eecd(struct wm_softc *sc)
   13701 {
   13702 	uint32_t reg;
   13703 
   13704 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13705 		device_xname(sc->sc_dev), __func__));
   13706 
   13707 	/* Stop nvm */
   13708 	reg = CSR_READ(sc, WMREG_EECD);
   13709 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13710 		/* Pull CS high */
   13711 		reg |= EECD_CS;
   13712 		wm_nvm_eec_clock_lower(sc, &reg);
   13713 	} else {
   13714 		/* CS on Microwire is active-high */
   13715 		reg &= ~(EECD_CS | EECD_DI);
   13716 		CSR_WRITE(sc, WMREG_EECD, reg);
   13717 		wm_nvm_eec_clock_raise(sc, &reg);
   13718 		wm_nvm_eec_clock_lower(sc, &reg);
   13719 	}
   13720 
   13721 	reg = CSR_READ(sc, WMREG_EECD);
   13722 	reg &= ~EECD_EE_REQ;
   13723 	CSR_WRITE(sc, WMREG_EECD, reg);
   13724 
   13725 	return;
   13726 }
   13727 
   13728 /*
   13729  * Get hardware semaphore.
   13730  * Same as e1000_get_hw_semaphore_generic()
   13731  */
   13732 static int
   13733 wm_get_swsm_semaphore(struct wm_softc *sc)
   13734 {
   13735 	int32_t timeout;
   13736 	uint32_t swsm;
   13737 
   13738 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13739 		device_xname(sc->sc_dev), __func__));
   13740 	KASSERT(sc->sc_nvm_wordsize > 0);
   13741 
   13742 retry:
   13743 	/* Get the SW semaphore. */
   13744 	timeout = sc->sc_nvm_wordsize + 1;
   13745 	while (timeout) {
   13746 		swsm = CSR_READ(sc, WMREG_SWSM);
   13747 
   13748 		if ((swsm & SWSM_SMBI) == 0)
   13749 			break;
   13750 
   13751 		delay(50);
   13752 		timeout--;
   13753 	}
   13754 
   13755 	if (timeout == 0) {
   13756 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13757 			/*
   13758 			 * In rare circumstances, the SW semaphore may already
   13759 			 * be held unintentionally. Clear the semaphore once
   13760 			 * before giving up.
   13761 			 */
   13762 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13763 			wm_put_swsm_semaphore(sc);
   13764 			goto retry;
   13765 		}
   13766 		aprint_error_dev(sc->sc_dev,
   13767 		    "could not acquire SWSM SMBI\n");
   13768 		return 1;
   13769 	}
   13770 
   13771 	/* Get the FW semaphore. */
   13772 	timeout = sc->sc_nvm_wordsize + 1;
   13773 	while (timeout) {
   13774 		swsm = CSR_READ(sc, WMREG_SWSM);
   13775 		swsm |= SWSM_SWESMBI;
   13776 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13777 		/* If we managed to set the bit we got the semaphore. */
   13778 		swsm = CSR_READ(sc, WMREG_SWSM);
   13779 		if (swsm & SWSM_SWESMBI)
   13780 			break;
   13781 
   13782 		delay(50);
   13783 		timeout--;
   13784 	}
   13785 
   13786 	if (timeout == 0) {
   13787 		aprint_error_dev(sc->sc_dev,
   13788 		    "could not acquire SWSM SWESMBI\n");
   13789 		/* Release semaphores */
   13790 		wm_put_swsm_semaphore(sc);
   13791 		return 1;
   13792 	}
   13793 	return 0;
   13794 }
   13795 
   13796 /*
   13797  * Put hardware semaphore.
   13798  * Same as e1000_put_hw_semaphore_generic()
   13799  */
   13800 static void
   13801 wm_put_swsm_semaphore(struct wm_softc *sc)
   13802 {
   13803 	uint32_t swsm;
   13804 
   13805 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13806 		device_xname(sc->sc_dev), __func__));
   13807 
   13808 	swsm = CSR_READ(sc, WMREG_SWSM);
   13809 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13810 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13811 }
   13812 
   13813 /*
   13814  * Get SW/FW semaphore.
   13815  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13816  */
   13817 static int
   13818 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13819 {
   13820 	uint32_t swfw_sync;
   13821 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13822 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13823 	int timeout;
   13824 
   13825 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13826 		device_xname(sc->sc_dev), __func__));
   13827 
   13828 	if (sc->sc_type == WM_T_80003)
   13829 		timeout = 50;
   13830 	else
   13831 		timeout = 200;
   13832 
   13833 	while (timeout) {
   13834 		if (wm_get_swsm_semaphore(sc)) {
   13835 			aprint_error_dev(sc->sc_dev,
   13836 			    "%s: failed to get semaphore\n",
   13837 			    __func__);
   13838 			return 1;
   13839 		}
   13840 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13841 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13842 			swfw_sync |= swmask;
   13843 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13844 			wm_put_swsm_semaphore(sc);
   13845 			return 0;
   13846 		}
   13847 		wm_put_swsm_semaphore(sc);
   13848 		delay(5000);
   13849 		timeout--;
   13850 	}
   13851 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13852 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13853 	return 1;
   13854 }
   13855 
   13856 static void
   13857 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13858 {
   13859 	uint32_t swfw_sync;
   13860 
   13861 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13862 		device_xname(sc->sc_dev), __func__));
   13863 
   13864 	while (wm_get_swsm_semaphore(sc) != 0)
   13865 		continue;
   13866 
   13867 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13868 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13869 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13870 
   13871 	wm_put_swsm_semaphore(sc);
   13872 }
   13873 
   13874 static int
   13875 wm_get_nvm_80003(struct wm_softc *sc)
   13876 {
   13877 	int rv;
   13878 
   13879 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13880 		device_xname(sc->sc_dev), __func__));
   13881 
   13882 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13883 		aprint_error_dev(sc->sc_dev,
   13884 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   13885 		return rv;
   13886 	}
   13887 
   13888 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13889 	    && (rv = wm_get_eecd(sc)) != 0) {
   13890 		aprint_error_dev(sc->sc_dev,
   13891 		    "%s: failed to get semaphore(EECD)\n", __func__);
   13892 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13893 		return rv;
   13894 	}
   13895 
   13896 	return 0;
   13897 }
   13898 
   13899 static void
   13900 wm_put_nvm_80003(struct wm_softc *sc)
   13901 {
   13902 
   13903 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13904 		device_xname(sc->sc_dev), __func__));
   13905 
   13906 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13907 		wm_put_eecd(sc);
   13908 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13909 }
   13910 
   13911 static int
   13912 wm_get_nvm_82571(struct wm_softc *sc)
   13913 {
   13914 	int rv;
   13915 
   13916 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13917 		device_xname(sc->sc_dev), __func__));
   13918 
   13919 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13920 		return rv;
   13921 
   13922 	switch (sc->sc_type) {
   13923 	case WM_T_82573:
   13924 		break;
   13925 	default:
   13926 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13927 			rv = wm_get_eecd(sc);
   13928 		break;
   13929 	}
   13930 
   13931 	if (rv != 0) {
   13932 		aprint_error_dev(sc->sc_dev,
   13933 		    "%s: failed to get semaphore\n",
   13934 		    __func__);
   13935 		wm_put_swsm_semaphore(sc);
   13936 	}
   13937 
   13938 	return rv;
   13939 }
   13940 
   13941 static void
   13942 wm_put_nvm_82571(struct wm_softc *sc)
   13943 {
   13944 
   13945 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13946 		device_xname(sc->sc_dev), __func__));
   13947 
   13948 	switch (sc->sc_type) {
   13949 	case WM_T_82573:
   13950 		break;
   13951 	default:
   13952 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13953 			wm_put_eecd(sc);
   13954 		break;
   13955 	}
   13956 
   13957 	wm_put_swsm_semaphore(sc);
   13958 }
   13959 
   13960 static int
   13961 wm_get_phy_82575(struct wm_softc *sc)
   13962 {
   13963 
   13964 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13965 		device_xname(sc->sc_dev), __func__));
   13966 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13967 }
   13968 
   13969 static void
   13970 wm_put_phy_82575(struct wm_softc *sc)
   13971 {
   13972 
   13973 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13974 		device_xname(sc->sc_dev), __func__));
   13975 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13976 }
   13977 
   13978 static int
   13979 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13980 {
   13981 	uint32_t ext_ctrl;
   13982 	int timeout = 200;
   13983 
   13984 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13985 		device_xname(sc->sc_dev), __func__));
   13986 
   13987 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13988 	for (timeout = 0; timeout < 200; timeout++) {
   13989 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13990 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13991 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13992 
   13993 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13994 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13995 			return 0;
   13996 		delay(5000);
   13997 	}
   13998 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13999 	    device_xname(sc->sc_dev), ext_ctrl);
   14000 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14001 	return 1;
   14002 }
   14003 
   14004 static void
   14005 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14006 {
   14007 	uint32_t ext_ctrl;
   14008 
   14009 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14010 		device_xname(sc->sc_dev), __func__));
   14011 
   14012 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14013 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14014 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14015 
   14016 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14017 }
   14018 
   14019 static int
   14020 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14021 {
   14022 	uint32_t ext_ctrl;
   14023 	int timeout;
   14024 
   14025 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14026 		device_xname(sc->sc_dev), __func__));
   14027 	mutex_enter(sc->sc_ich_phymtx);
   14028 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14029 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14030 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14031 			break;
   14032 		delay(1000);
   14033 	}
   14034 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14035 		printf("%s: SW has already locked the resource\n",
   14036 		    device_xname(sc->sc_dev));
   14037 		goto out;
   14038 	}
   14039 
   14040 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14041 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14042 	for (timeout = 0; timeout < 1000; timeout++) {
   14043 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14044 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14045 			break;
   14046 		delay(1000);
   14047 	}
   14048 	if (timeout >= 1000) {
   14049 		printf("%s: failed to acquire semaphore\n",
   14050 		    device_xname(sc->sc_dev));
   14051 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14052 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14053 		goto out;
   14054 	}
   14055 	return 0;
   14056 
   14057 out:
   14058 	mutex_exit(sc->sc_ich_phymtx);
   14059 	return 1;
   14060 }
   14061 
   14062 static void
   14063 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14064 {
   14065 	uint32_t ext_ctrl;
   14066 
   14067 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14068 		device_xname(sc->sc_dev), __func__));
   14069 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14070 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14071 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14072 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14073 	} else {
   14074 		printf("%s: Semaphore unexpectedly released\n",
   14075 		    device_xname(sc->sc_dev));
   14076 	}
   14077 
   14078 	mutex_exit(sc->sc_ich_phymtx);
   14079 }
   14080 
   14081 static int
   14082 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14083 {
   14084 
   14085 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14086 		device_xname(sc->sc_dev), __func__));
   14087 	mutex_enter(sc->sc_ich_nvmmtx);
   14088 
   14089 	return 0;
   14090 }
   14091 
   14092 static void
   14093 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14094 {
   14095 
   14096 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14097 		device_xname(sc->sc_dev), __func__));
   14098 	mutex_exit(sc->sc_ich_nvmmtx);
   14099 }
   14100 
   14101 static int
   14102 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14103 {
   14104 	int i = 0;
   14105 	uint32_t reg;
   14106 
   14107 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14108 		device_xname(sc->sc_dev), __func__));
   14109 
   14110 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14111 	do {
   14112 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14113 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14114 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14115 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14116 			break;
   14117 		delay(2*1000);
   14118 		i++;
   14119 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14120 
   14121 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14122 		wm_put_hw_semaphore_82573(sc);
   14123 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14124 		    device_xname(sc->sc_dev));
   14125 		return -1;
   14126 	}
   14127 
   14128 	return 0;
   14129 }
   14130 
   14131 static void
   14132 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14133 {
   14134 	uint32_t reg;
   14135 
   14136 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14137 		device_xname(sc->sc_dev), __func__));
   14138 
   14139 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14140 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14141 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14142 }
   14143 
   14144 /*
   14145  * Management mode and power management related subroutines.
   14146  * BMC, AMT, suspend/resume and EEE.
   14147  */
   14148 
   14149 #ifdef WM_WOL
   14150 static int
   14151 wm_check_mng_mode(struct wm_softc *sc)
   14152 {
   14153 	int rv;
   14154 
   14155 	switch (sc->sc_type) {
   14156 	case WM_T_ICH8:
   14157 	case WM_T_ICH9:
   14158 	case WM_T_ICH10:
   14159 	case WM_T_PCH:
   14160 	case WM_T_PCH2:
   14161 	case WM_T_PCH_LPT:
   14162 	case WM_T_PCH_SPT:
   14163 	case WM_T_PCH_CNP:
   14164 		rv = wm_check_mng_mode_ich8lan(sc);
   14165 		break;
   14166 	case WM_T_82574:
   14167 	case WM_T_82583:
   14168 		rv = wm_check_mng_mode_82574(sc);
   14169 		break;
   14170 	case WM_T_82571:
   14171 	case WM_T_82572:
   14172 	case WM_T_82573:
   14173 	case WM_T_80003:
   14174 		rv = wm_check_mng_mode_generic(sc);
   14175 		break;
   14176 	default:
   14177 		/* Noting to do */
   14178 		rv = 0;
   14179 		break;
   14180 	}
   14181 
   14182 	return rv;
   14183 }
   14184 
   14185 static int
   14186 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14187 {
   14188 	uint32_t fwsm;
   14189 
   14190 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14191 
   14192 	if (((fwsm & FWSM_FW_VALID) != 0)
   14193 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14194 		return 1;
   14195 
   14196 	return 0;
   14197 }
   14198 
   14199 static int
   14200 wm_check_mng_mode_82574(struct wm_softc *sc)
   14201 {
   14202 	uint16_t data;
   14203 
   14204 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14205 
   14206 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14207 		return 1;
   14208 
   14209 	return 0;
   14210 }
   14211 
   14212 static int
   14213 wm_check_mng_mode_generic(struct wm_softc *sc)
   14214 {
   14215 	uint32_t fwsm;
   14216 
   14217 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14218 
   14219 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14220 		return 1;
   14221 
   14222 	return 0;
   14223 }
   14224 #endif /* WM_WOL */
   14225 
   14226 static int
   14227 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14228 {
   14229 	uint32_t manc, fwsm, factps;
   14230 
   14231 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14232 		return 0;
   14233 
   14234 	manc = CSR_READ(sc, WMREG_MANC);
   14235 
   14236 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14237 		device_xname(sc->sc_dev), manc));
   14238 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14239 		return 0;
   14240 
   14241 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14242 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14243 		factps = CSR_READ(sc, WMREG_FACTPS);
   14244 		if (((factps & FACTPS_MNGCG) == 0)
   14245 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14246 			return 1;
   14247 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14248 		uint16_t data;
   14249 
   14250 		factps = CSR_READ(sc, WMREG_FACTPS);
   14251 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14252 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14253 			device_xname(sc->sc_dev), factps, data));
   14254 		if (((factps & FACTPS_MNGCG) == 0)
   14255 		    && ((data & NVM_CFG2_MNGM_MASK)
   14256 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14257 			return 1;
   14258 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14259 	    && ((manc & MANC_ASF_EN) == 0))
   14260 		return 1;
   14261 
   14262 	return 0;
   14263 }
   14264 
   14265 static bool
   14266 wm_phy_resetisblocked(struct wm_softc *sc)
   14267 {
   14268 	bool blocked = false;
   14269 	uint32_t reg;
   14270 	int i = 0;
   14271 
   14272 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14273 		device_xname(sc->sc_dev), __func__));
   14274 
   14275 	switch (sc->sc_type) {
   14276 	case WM_T_ICH8:
   14277 	case WM_T_ICH9:
   14278 	case WM_T_ICH10:
   14279 	case WM_T_PCH:
   14280 	case WM_T_PCH2:
   14281 	case WM_T_PCH_LPT:
   14282 	case WM_T_PCH_SPT:
   14283 	case WM_T_PCH_CNP:
   14284 		do {
   14285 			reg = CSR_READ(sc, WMREG_FWSM);
   14286 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14287 				blocked = true;
   14288 				delay(10*1000);
   14289 				continue;
   14290 			}
   14291 			blocked = false;
   14292 		} while (blocked && (i++ < 30));
   14293 		return blocked;
   14294 		break;
   14295 	case WM_T_82571:
   14296 	case WM_T_82572:
   14297 	case WM_T_82573:
   14298 	case WM_T_82574:
   14299 	case WM_T_82583:
   14300 	case WM_T_80003:
   14301 		reg = CSR_READ(sc, WMREG_MANC);
   14302 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14303 			return true;
   14304 		else
   14305 			return false;
   14306 		break;
   14307 	default:
   14308 		/* No problem */
   14309 		break;
   14310 	}
   14311 
   14312 	return false;
   14313 }
   14314 
   14315 static void
   14316 wm_get_hw_control(struct wm_softc *sc)
   14317 {
   14318 	uint32_t reg;
   14319 
   14320 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14321 		device_xname(sc->sc_dev), __func__));
   14322 
   14323 	if (sc->sc_type == WM_T_82573) {
   14324 		reg = CSR_READ(sc, WMREG_SWSM);
   14325 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14326 	} else if (sc->sc_type >= WM_T_82571) {
   14327 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14328 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14329 	}
   14330 }
   14331 
   14332 static void
   14333 wm_release_hw_control(struct wm_softc *sc)
   14334 {
   14335 	uint32_t reg;
   14336 
   14337 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14338 		device_xname(sc->sc_dev), __func__));
   14339 
   14340 	if (sc->sc_type == WM_T_82573) {
   14341 		reg = CSR_READ(sc, WMREG_SWSM);
   14342 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14343 	} else if (sc->sc_type >= WM_T_82571) {
   14344 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14345 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14346 	}
   14347 }
   14348 
   14349 static void
   14350 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14351 {
   14352 	uint32_t reg;
   14353 
   14354 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14355 		device_xname(sc->sc_dev), __func__));
   14356 
   14357 	if (sc->sc_type < WM_T_PCH2)
   14358 		return;
   14359 
   14360 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14361 
   14362 	if (gate)
   14363 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14364 	else
   14365 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14366 
   14367 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14368 }
   14369 
   14370 static int
   14371 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14372 {
   14373 	uint32_t fwsm, reg;
   14374 	int rv = 0;
   14375 
   14376 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14377 		device_xname(sc->sc_dev), __func__));
   14378 
   14379 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14380 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14381 
   14382 	/* Disable ULP */
   14383 	wm_ulp_disable(sc);
   14384 
   14385 	/* Acquire PHY semaphore */
   14386 	rv = sc->phy.acquire(sc);
   14387 	if (rv != 0) {
   14388 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14389 		device_xname(sc->sc_dev), __func__));
   14390 		return -1;
   14391 	}
   14392 
   14393 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14394 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14395 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14396 	 */
   14397 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14398 	switch (sc->sc_type) {
   14399 	case WM_T_PCH_LPT:
   14400 	case WM_T_PCH_SPT:
   14401 	case WM_T_PCH_CNP:
   14402 		if (wm_phy_is_accessible_pchlan(sc))
   14403 			break;
   14404 
   14405 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14406 		 * forcing MAC to SMBus mode first.
   14407 		 */
   14408 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14409 		reg |= CTRL_EXT_FORCE_SMBUS;
   14410 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14411 #if 0
   14412 		/* XXX Isn't this required??? */
   14413 		CSR_WRITE_FLUSH(sc);
   14414 #endif
   14415 		/* Wait 50 milliseconds for MAC to finish any retries
   14416 		 * that it might be trying to perform from previous
   14417 		 * attempts to acknowledge any phy read requests.
   14418 		 */
   14419 		delay(50 * 1000);
   14420 		/* FALLTHROUGH */
   14421 	case WM_T_PCH2:
   14422 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14423 			break;
   14424 		/* FALLTHROUGH */
   14425 	case WM_T_PCH:
   14426 		if (sc->sc_type == WM_T_PCH)
   14427 			if ((fwsm & FWSM_FW_VALID) != 0)
   14428 				break;
   14429 
   14430 		if (wm_phy_resetisblocked(sc) == true) {
   14431 			printf("XXX reset is blocked(3)\n");
   14432 			break;
   14433 		}
   14434 
   14435 		/* Toggle LANPHYPC Value bit */
   14436 		wm_toggle_lanphypc_pch_lpt(sc);
   14437 
   14438 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14439 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14440 				break;
   14441 
   14442 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14443 			 * so ensure that the MAC is also out of SMBus mode
   14444 			 */
   14445 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14446 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14447 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14448 
   14449 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14450 				break;
   14451 			rv = -1;
   14452 		}
   14453 		break;
   14454 	default:
   14455 		break;
   14456 	}
   14457 
   14458 	/* Release semaphore */
   14459 	sc->phy.release(sc);
   14460 
   14461 	if (rv == 0) {
   14462 		/* Check to see if able to reset PHY.  Print error if not */
   14463 		if (wm_phy_resetisblocked(sc)) {
   14464 			printf("XXX reset is blocked(4)\n");
   14465 			goto out;
   14466 		}
   14467 
   14468 		/* Reset the PHY before any access to it.  Doing so, ensures
   14469 		 * that the PHY is in a known good state before we read/write
   14470 		 * PHY registers.  The generic reset is sufficient here,
   14471 		 * because we haven't determined the PHY type yet.
   14472 		 */
   14473 		if (wm_reset_phy(sc) != 0)
   14474 			goto out;
   14475 
   14476 		/* On a successful reset, possibly need to wait for the PHY
   14477 		 * to quiesce to an accessible state before returning control
   14478 		 * to the calling function.  If the PHY does not quiesce, then
   14479 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14480 		 *  the PHY is in.
   14481 		 */
   14482 		if (wm_phy_resetisblocked(sc))
   14483 			printf("XXX reset is blocked(4)\n");
   14484 	}
   14485 
   14486 out:
   14487 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14488 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14489 		delay(10*1000);
   14490 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14491 	}
   14492 
   14493 	return 0;
   14494 }
   14495 
   14496 static void
   14497 wm_init_manageability(struct wm_softc *sc)
   14498 {
   14499 
   14500 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14501 		device_xname(sc->sc_dev), __func__));
   14502 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14503 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14504 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14505 
   14506 		/* Disable hardware interception of ARP */
   14507 		manc &= ~MANC_ARP_EN;
   14508 
   14509 		/* Enable receiving management packets to the host */
   14510 		if (sc->sc_type >= WM_T_82571) {
   14511 			manc |= MANC_EN_MNG2HOST;
   14512 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14513 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14514 		}
   14515 
   14516 		CSR_WRITE(sc, WMREG_MANC, manc);
   14517 	}
   14518 }
   14519 
   14520 static void
   14521 wm_release_manageability(struct wm_softc *sc)
   14522 {
   14523 
   14524 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14525 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14526 
   14527 		manc |= MANC_ARP_EN;
   14528 		if (sc->sc_type >= WM_T_82571)
   14529 			manc &= ~MANC_EN_MNG2HOST;
   14530 
   14531 		CSR_WRITE(sc, WMREG_MANC, manc);
   14532 	}
   14533 }
   14534 
   14535 static void
   14536 wm_get_wakeup(struct wm_softc *sc)
   14537 {
   14538 
   14539 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14540 	switch (sc->sc_type) {
   14541 	case WM_T_82573:
   14542 	case WM_T_82583:
   14543 		sc->sc_flags |= WM_F_HAS_AMT;
   14544 		/* FALLTHROUGH */
   14545 	case WM_T_80003:
   14546 	case WM_T_82575:
   14547 	case WM_T_82576:
   14548 	case WM_T_82580:
   14549 	case WM_T_I350:
   14550 	case WM_T_I354:
   14551 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14552 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14553 		/* FALLTHROUGH */
   14554 	case WM_T_82541:
   14555 	case WM_T_82541_2:
   14556 	case WM_T_82547:
   14557 	case WM_T_82547_2:
   14558 	case WM_T_82571:
   14559 	case WM_T_82572:
   14560 	case WM_T_82574:
   14561 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14562 		break;
   14563 	case WM_T_ICH8:
   14564 	case WM_T_ICH9:
   14565 	case WM_T_ICH10:
   14566 	case WM_T_PCH:
   14567 	case WM_T_PCH2:
   14568 	case WM_T_PCH_LPT:
   14569 	case WM_T_PCH_SPT:
   14570 	case WM_T_PCH_CNP:
   14571 		sc->sc_flags |= WM_F_HAS_AMT;
   14572 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14573 		break;
   14574 	default:
   14575 		break;
   14576 	}
   14577 
   14578 	/* 1: HAS_MANAGE */
   14579 	if (wm_enable_mng_pass_thru(sc) != 0)
   14580 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14581 
   14582 	/*
   14583 	 * Note that the WOL flags is set after the resetting of the eeprom
   14584 	 * stuff
   14585 	 */
   14586 }
   14587 
   14588 /*
   14589  * Unconfigure Ultra Low Power mode.
   14590  * Only for I217 and newer (see below).
   14591  */
   14592 static int
   14593 wm_ulp_disable(struct wm_softc *sc)
   14594 {
   14595 	uint32_t reg;
   14596 	uint16_t phyreg;
   14597 	int i = 0, rv = 0;
   14598 
   14599 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14600 		device_xname(sc->sc_dev), __func__));
   14601 	/* Exclude old devices */
   14602 	if ((sc->sc_type < WM_T_PCH_LPT)
   14603 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14604 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14605 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14606 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14607 		return 0;
   14608 
   14609 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14610 		/* Request ME un-configure ULP mode in the PHY */
   14611 		reg = CSR_READ(sc, WMREG_H2ME);
   14612 		reg &= ~H2ME_ULP;
   14613 		reg |= H2ME_ENFORCE_SETTINGS;
   14614 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14615 
   14616 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14617 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14618 			if (i++ == 30) {
   14619 				printf("%s timed out\n", __func__);
   14620 				return -1;
   14621 			}
   14622 			delay(10 * 1000);
   14623 		}
   14624 		reg = CSR_READ(sc, WMREG_H2ME);
   14625 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14626 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14627 
   14628 		return 0;
   14629 	}
   14630 
   14631 	/* Acquire semaphore */
   14632 	rv = sc->phy.acquire(sc);
   14633 	if (rv != 0) {
   14634 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14635 		device_xname(sc->sc_dev), __func__));
   14636 		return -1;
   14637 	}
   14638 
   14639 	/* Toggle LANPHYPC */
   14640 	wm_toggle_lanphypc_pch_lpt(sc);
   14641 
   14642 	/* Unforce SMBus mode in PHY */
   14643 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14644 	if (rv != 0) {
   14645 		uint32_t reg2;
   14646 
   14647 		printf("%s: Force SMBus first.\n", __func__);
   14648 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14649 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14650 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14651 		delay(50 * 1000);
   14652 
   14653 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14654 		    &phyreg);
   14655 		if (rv != 0)
   14656 			goto release;
   14657 	}
   14658 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14659 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14660 
   14661 	/* Unforce SMBus mode in MAC */
   14662 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14663 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14664 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14665 
   14666 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14667 	if (rv != 0)
   14668 		goto release;
   14669 	phyreg |= HV_PM_CTRL_K1_ENA;
   14670 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14671 
   14672 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14673 		&phyreg);
   14674 	if (rv != 0)
   14675 		goto release;
   14676 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14677 	    | I218_ULP_CONFIG1_STICKY_ULP
   14678 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14679 	    | I218_ULP_CONFIG1_WOL_HOST
   14680 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14681 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14682 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14683 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14684 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14685 	phyreg |= I218_ULP_CONFIG1_START;
   14686 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14687 
   14688 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14689 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14690 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14691 
   14692 release:
   14693 	/* Release semaphore */
   14694 	sc->phy.release(sc);
   14695 	wm_gmii_reset(sc);
   14696 	delay(50 * 1000);
   14697 
   14698 	return rv;
   14699 }
   14700 
   14701 /* WOL in the newer chipset interfaces (pchlan) */
   14702 static int
   14703 wm_enable_phy_wakeup(struct wm_softc *sc)
   14704 {
   14705 	device_t dev = sc->sc_dev;
   14706 	uint32_t mreg, moff;
   14707 	uint16_t wuce, wuc, wufc, preg;
   14708 	int i, rv;
   14709 
   14710 	KASSERT(sc->sc_type >= WM_T_PCH);
   14711 
   14712 	/* Copy MAC RARs to PHY RARs */
   14713 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14714 
   14715 	/* Activate PHY wakeup */
   14716 	rv = sc->phy.acquire(sc);
   14717 	if (rv != 0) {
   14718 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14719 		    __func__);
   14720 		return rv;
   14721 	}
   14722 
   14723 	/*
   14724 	 * Enable access to PHY wakeup registers.
   14725 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14726 	 */
   14727 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14728 	if (rv != 0) {
   14729 		device_printf(dev,
   14730 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14731 		goto release;
   14732 	}
   14733 
   14734 	/* Copy MAC MTA to PHY MTA */
   14735 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14736 		uint16_t lo, hi;
   14737 
   14738 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14739 		lo = (uint16_t)(mreg & 0xffff);
   14740 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14741 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14742 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14743 	}
   14744 
   14745 	/* Configure PHY Rx Control register */
   14746 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14747 	mreg = CSR_READ(sc, WMREG_RCTL);
   14748 	if (mreg & RCTL_UPE)
   14749 		preg |= BM_RCTL_UPE;
   14750 	if (mreg & RCTL_MPE)
   14751 		preg |= BM_RCTL_MPE;
   14752 	preg &= ~(BM_RCTL_MO_MASK);
   14753 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14754 	if (moff != 0)
   14755 		preg |= moff << BM_RCTL_MO_SHIFT;
   14756 	if (mreg & RCTL_BAM)
   14757 		preg |= BM_RCTL_BAM;
   14758 	if (mreg & RCTL_PMCF)
   14759 		preg |= BM_RCTL_PMCF;
   14760 	mreg = CSR_READ(sc, WMREG_CTRL);
   14761 	if (mreg & CTRL_RFCE)
   14762 		preg |= BM_RCTL_RFCE;
   14763 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14764 
   14765 	wuc = WUC_APME | WUC_PME_EN;
   14766 	wufc = WUFC_MAG;
   14767 	/* Enable PHY wakeup in MAC register */
   14768 	CSR_WRITE(sc, WMREG_WUC,
   14769 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14770 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14771 
   14772 	/* Configure and enable PHY wakeup in PHY registers */
   14773 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14774 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14775 
   14776 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14777 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14778 
   14779 release:
   14780 	sc->phy.release(sc);
   14781 
   14782 	return 0;
   14783 }
   14784 
   14785 /* Power down workaround on D3 */
   14786 static void
   14787 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14788 {
   14789 	uint32_t reg;
   14790 	uint16_t phyreg;
   14791 	int i;
   14792 
   14793 	for (i = 0; i < 2; i++) {
   14794 		/* Disable link */
   14795 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14796 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14797 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14798 
   14799 		/*
   14800 		 * Call gig speed drop workaround on Gig disable before
   14801 		 * accessing any PHY registers
   14802 		 */
   14803 		if (sc->sc_type == WM_T_ICH8)
   14804 			wm_gig_downshift_workaround_ich8lan(sc);
   14805 
   14806 		/* Write VR power-down enable */
   14807 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14808 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14809 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14810 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14811 
   14812 		/* Read it back and test */
   14813 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14814 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14815 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14816 			break;
   14817 
   14818 		/* Issue PHY reset and repeat at most one more time */
   14819 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14820 	}
   14821 }
   14822 
   14823 /*
   14824  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14825  *  @sc: pointer to the HW structure
   14826  *
   14827  *  During S0 to Sx transition, it is possible the link remains at gig
   14828  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14829  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14830  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14831  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14832  *  needs to be written.
   14833  *  Parts that support (and are linked to a partner which support) EEE in
   14834  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14835  *  than 10Mbps w/o EEE.
   14836  */
   14837 static void
   14838 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14839 {
   14840 	device_t dev = sc->sc_dev;
   14841 	struct ethercom *ec = &sc->sc_ethercom;
   14842 	uint32_t phy_ctrl;
   14843 	int rv;
   14844 
   14845 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14846 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14847 
   14848 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14849 
   14850 	if (sc->sc_phytype == WMPHY_I217) {
   14851 		uint16_t devid = sc->sc_pcidevid;
   14852 
   14853 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14854 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14855 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14856 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14857 		    (sc->sc_type >= WM_T_PCH_SPT))
   14858 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14859 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14860 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14861 
   14862 		if (sc->phy.acquire(sc) != 0)
   14863 			goto out;
   14864 
   14865 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14866 			uint16_t eee_advert;
   14867 
   14868 			rv = wm_read_emi_reg_locked(dev,
   14869 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14870 			if (rv)
   14871 				goto release;
   14872 
   14873 			/*
   14874 			 * Disable LPLU if both link partners support 100BaseT
   14875 			 * EEE and 100Full is advertised on both ends of the
   14876 			 * link, and enable Auto Enable LPI since there will
   14877 			 * be no driver to enable LPI while in Sx.
   14878 			 */
   14879 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14880 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14881 				uint16_t anar, phy_reg;
   14882 
   14883 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14884 				    &anar);
   14885 				if (anar & ANAR_TX_FD) {
   14886 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14887 					    PHY_CTRL_NOND0A_LPLU);
   14888 
   14889 					/* Set Auto Enable LPI after link up */
   14890 					sc->phy.readreg_locked(dev, 2,
   14891 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14892 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14893 					sc->phy.writereg_locked(dev, 2,
   14894 					    I217_LPI_GPIO_CTRL, phy_reg);
   14895 				}
   14896 			}
   14897 		}
   14898 
   14899 		/*
   14900 		 * For i217 Intel Rapid Start Technology support,
   14901 		 * when the system is going into Sx and no manageability engine
   14902 		 * is present, the driver must configure proxy to reset only on
   14903 		 * power good.	LPI (Low Power Idle) state must also reset only
   14904 		 * on power good, as well as the MTA (Multicast table array).
   14905 		 * The SMBus release must also be disabled on LCD reset.
   14906 		 */
   14907 
   14908 		/*
   14909 		 * Enable MTA to reset for Intel Rapid Start Technology
   14910 		 * Support
   14911 		 */
   14912 
   14913 release:
   14914 		sc->phy.release(sc);
   14915 	}
   14916 out:
   14917 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14918 
   14919 	if (sc->sc_type == WM_T_ICH8)
   14920 		wm_gig_downshift_workaround_ich8lan(sc);
   14921 
   14922 	if (sc->sc_type >= WM_T_PCH) {
   14923 		wm_oem_bits_config_ich8lan(sc, false);
   14924 
   14925 		/* Reset PHY to activate OEM bits on 82577/8 */
   14926 		if (sc->sc_type == WM_T_PCH)
   14927 			wm_reset_phy(sc);
   14928 
   14929 		if (sc->phy.acquire(sc) != 0)
   14930 			return;
   14931 		wm_write_smbus_addr(sc);
   14932 		sc->phy.release(sc);
   14933 	}
   14934 }
   14935 
   14936 /*
   14937  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14938  *  @sc: pointer to the HW structure
   14939  *
   14940  *  During Sx to S0 transitions on non-managed devices or managed devices
   14941  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14942  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14943  *  the PHY.
   14944  *  On i217, setup Intel Rapid Start Technology.
   14945  */
   14946 static int
   14947 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14948 {
   14949 	device_t dev = sc->sc_dev;
   14950 	int rv;
   14951 
   14952 	if (sc->sc_type < WM_T_PCH2)
   14953 		return 0;
   14954 
   14955 	rv = wm_init_phy_workarounds_pchlan(sc);
   14956 	if (rv != 0)
   14957 		return -1;
   14958 
   14959 	/* For i217 Intel Rapid Start Technology support when the system
   14960 	 * is transitioning from Sx and no manageability engine is present
   14961 	 * configure SMBus to restore on reset, disable proxy, and enable
   14962 	 * the reset on MTA (Multicast table array).
   14963 	 */
   14964 	if (sc->sc_phytype == WMPHY_I217) {
   14965 		uint16_t phy_reg;
   14966 
   14967 		if (sc->phy.acquire(sc) != 0)
   14968 			return -1;
   14969 
   14970 		/* Clear Auto Enable LPI after link up */
   14971 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14972 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14973 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14974 
   14975 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14976 			/* Restore clear on SMB if no manageability engine
   14977 			 * is present
   14978 			 */
   14979 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14980 			    &phy_reg);
   14981 			if (rv != 0)
   14982 				goto release;
   14983 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14984 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14985 
   14986 			/* Disable Proxy */
   14987 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14988 		}
   14989 		/* Enable reset on MTA */
   14990 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14991 		if (rv != 0)
   14992 			goto release;
   14993 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14994 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14995 
   14996 release:
   14997 		sc->phy.release(sc);
   14998 		return rv;
   14999 	}
   15000 
   15001 	return 0;
   15002 }
   15003 
   15004 static void
   15005 wm_enable_wakeup(struct wm_softc *sc)
   15006 {
   15007 	uint32_t reg, pmreg;
   15008 	pcireg_t pmode;
   15009 	int rv = 0;
   15010 
   15011 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15012 		device_xname(sc->sc_dev), __func__));
   15013 
   15014 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15015 	    &pmreg, NULL) == 0)
   15016 		return;
   15017 
   15018 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15019 		goto pme;
   15020 
   15021 	/* Advertise the wakeup capability */
   15022 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15023 	    | CTRL_SWDPIN(3));
   15024 
   15025 	/* Keep the laser running on fiber adapters */
   15026 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15027 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15028 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15029 		reg |= CTRL_EXT_SWDPIN(3);
   15030 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15031 	}
   15032 
   15033 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15034 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15035 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15036 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15037 		wm_suspend_workarounds_ich8lan(sc);
   15038 
   15039 #if 0	/* For the multicast packet */
   15040 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15041 	reg |= WUFC_MC;
   15042 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15043 #endif
   15044 
   15045 	if (sc->sc_type >= WM_T_PCH) {
   15046 		rv = wm_enable_phy_wakeup(sc);
   15047 		if (rv != 0)
   15048 			goto pme;
   15049 	} else {
   15050 		/* Enable wakeup by the MAC */
   15051 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15052 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15053 	}
   15054 
   15055 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15056 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15057 		|| (sc->sc_type == WM_T_PCH2))
   15058 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15059 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15060 
   15061 pme:
   15062 	/* Request PME */
   15063 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15064 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15065 		/* For WOL */
   15066 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15067 	} else {
   15068 		/* Disable WOL */
   15069 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15070 	}
   15071 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15072 }
   15073 
   15074 /* Disable ASPM L0s and/or L1 for workaround */
   15075 static void
   15076 wm_disable_aspm(struct wm_softc *sc)
   15077 {
   15078 	pcireg_t reg, mask = 0;
   15079 	unsigned const char *str = "";
   15080 
   15081 	/*
   15082 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15083 	 * space.
   15084 	 */
   15085 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15086 		return;
   15087 
   15088 	switch (sc->sc_type) {
   15089 	case WM_T_82571:
   15090 	case WM_T_82572:
   15091 		/*
   15092 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15093 		 * State Power management L1 State (ASPM L1).
   15094 		 */
   15095 		mask = PCIE_LCSR_ASPM_L1;
   15096 		str = "L1 is";
   15097 		break;
   15098 	case WM_T_82573:
   15099 	case WM_T_82574:
   15100 	case WM_T_82583:
   15101 		/*
   15102 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15103 		 *
   15104 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15105 		 * some chipset.  The document of 82574 and 82583 says that
   15106 		 * disabling L0s with some specific chipset is sufficient,
   15107 		 * but we follow as of the Intel em driver does.
   15108 		 *
   15109 		 * References:
   15110 		 * Errata 8 of the Specification Update of i82573.
   15111 		 * Errata 20 of the Specification Update of i82574.
   15112 		 * Errata 9 of the Specification Update of i82583.
   15113 		 */
   15114 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15115 		str = "L0s and L1 are";
   15116 		break;
   15117 	default:
   15118 		return;
   15119 	}
   15120 
   15121 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15122 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15123 	reg &= ~mask;
   15124 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15125 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15126 
   15127 	/* Print only in wm_attach() */
   15128 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15129 		aprint_verbose_dev(sc->sc_dev,
   15130 		    "ASPM %s disabled to workaround the errata.\n", str);
   15131 }
   15132 
   15133 /* LPLU */
   15134 
   15135 static void
   15136 wm_lplu_d0_disable(struct wm_softc *sc)
   15137 {
   15138 	struct mii_data *mii = &sc->sc_mii;
   15139 	uint32_t reg;
   15140 	uint16_t phyval;
   15141 
   15142 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15143 		device_xname(sc->sc_dev), __func__));
   15144 
   15145 	if (sc->sc_phytype == WMPHY_IFE)
   15146 		return;
   15147 
   15148 	switch (sc->sc_type) {
   15149 	case WM_T_82571:
   15150 	case WM_T_82572:
   15151 	case WM_T_82573:
   15152 	case WM_T_82575:
   15153 	case WM_T_82576:
   15154 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15155 		phyval &= ~PMR_D0_LPLU;
   15156 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15157 		break;
   15158 	case WM_T_82580:
   15159 	case WM_T_I350:
   15160 	case WM_T_I210:
   15161 	case WM_T_I211:
   15162 		reg = CSR_READ(sc, WMREG_PHPM);
   15163 		reg &= ~PHPM_D0A_LPLU;
   15164 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15165 		break;
   15166 	case WM_T_82574:
   15167 	case WM_T_82583:
   15168 	case WM_T_ICH8:
   15169 	case WM_T_ICH9:
   15170 	case WM_T_ICH10:
   15171 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15172 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15173 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15174 		CSR_WRITE_FLUSH(sc);
   15175 		break;
   15176 	case WM_T_PCH:
   15177 	case WM_T_PCH2:
   15178 	case WM_T_PCH_LPT:
   15179 	case WM_T_PCH_SPT:
   15180 	case WM_T_PCH_CNP:
   15181 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15182 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15183 		if (wm_phy_resetisblocked(sc) == false)
   15184 			phyval |= HV_OEM_BITS_ANEGNOW;
   15185 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15186 		break;
   15187 	default:
   15188 		break;
   15189 	}
   15190 }
   15191 
   15192 /* EEE */
   15193 
   15194 static int
   15195 wm_set_eee_i350(struct wm_softc *sc)
   15196 {
   15197 	struct ethercom *ec = &sc->sc_ethercom;
   15198 	uint32_t ipcnfg, eeer;
   15199 	uint32_t ipcnfg_mask
   15200 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15201 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15202 
   15203 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15204 
   15205 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15206 	eeer = CSR_READ(sc, WMREG_EEER);
   15207 
   15208 	/* Enable or disable per user setting */
   15209 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15210 		ipcnfg |= ipcnfg_mask;
   15211 		eeer |= eeer_mask;
   15212 	} else {
   15213 		ipcnfg &= ~ipcnfg_mask;
   15214 		eeer &= ~eeer_mask;
   15215 	}
   15216 
   15217 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15218 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15219 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15220 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15221 
   15222 	return 0;
   15223 }
   15224 
   15225 static int
   15226 wm_set_eee_pchlan(struct wm_softc *sc)
   15227 {
   15228 	device_t dev = sc->sc_dev;
   15229 	struct ethercom *ec = &sc->sc_ethercom;
   15230 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15231 	int rv = 0;
   15232 
   15233 	switch (sc->sc_phytype) {
   15234 	case WMPHY_82579:
   15235 		lpa = I82579_EEE_LP_ABILITY;
   15236 		pcs_status = I82579_EEE_PCS_STATUS;
   15237 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15238 		break;
   15239 	case WMPHY_I217:
   15240 		lpa = I217_EEE_LP_ABILITY;
   15241 		pcs_status = I217_EEE_PCS_STATUS;
   15242 		adv_addr = I217_EEE_ADVERTISEMENT;
   15243 		break;
   15244 	default:
   15245 		return 0;
   15246 	}
   15247 
   15248 	if (sc->phy.acquire(sc)) {
   15249 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15250 		return 0;
   15251 	}
   15252 
   15253 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15254 	if (rv != 0)
   15255 		goto release;
   15256 
   15257 	/* Clear bits that enable EEE in various speeds */
   15258 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15259 
   15260 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15261 		/* Save off link partner's EEE ability */
   15262 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15263 		if (rv != 0)
   15264 			goto release;
   15265 
   15266 		/* Read EEE advertisement */
   15267 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15268 			goto release;
   15269 
   15270 		/*
   15271 		 * Enable EEE only for speeds in which the link partner is
   15272 		 * EEE capable and for which we advertise EEE.
   15273 		 */
   15274 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15275 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15276 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15277 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15278 			if ((data & ANLPAR_TX_FD) != 0)
   15279 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15280 			else {
   15281 				/*
   15282 				 * EEE is not supported in 100Half, so ignore
   15283 				 * partner's EEE in 100 ability if full-duplex
   15284 				 * is not advertised.
   15285 				 */
   15286 				sc->eee_lp_ability
   15287 				    &= ~AN_EEEADVERT_100_TX;
   15288 			}
   15289 		}
   15290 	}
   15291 
   15292 	if (sc->sc_phytype == WMPHY_82579) {
   15293 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15294 		if (rv != 0)
   15295 			goto release;
   15296 
   15297 		data &= ~I82579_LPI_PLL_SHUT_100;
   15298 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15299 	}
   15300 
   15301 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15302 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15303 		goto release;
   15304 
   15305 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15306 release:
   15307 	sc->phy.release(sc);
   15308 
   15309 	return rv;
   15310 }
   15311 
   15312 static int
   15313 wm_set_eee(struct wm_softc *sc)
   15314 {
   15315 	struct ethercom *ec = &sc->sc_ethercom;
   15316 
   15317 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15318 		return 0;
   15319 
   15320 	if (sc->sc_type == WM_T_I354) {
   15321 		/* I354 uses an external PHY */
   15322 		return 0; /* not yet */
   15323 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15324 		return wm_set_eee_i350(sc);
   15325 	else if (sc->sc_type >= WM_T_PCH2)
   15326 		return wm_set_eee_pchlan(sc);
   15327 
   15328 	return 0;
   15329 }
   15330 
   15331 /*
   15332  * Workarounds (mainly PHY related).
   15333  * Basically, PHY's workarounds are in the PHY drivers.
   15334  */
   15335 
   15336 /* Work-around for 82566 Kumeran PCS lock loss */
   15337 static int
   15338 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15339 {
   15340 	struct mii_data *mii = &sc->sc_mii;
   15341 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15342 	int i, reg, rv;
   15343 	uint16_t phyreg;
   15344 
   15345 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15346 		device_xname(sc->sc_dev), __func__));
   15347 
   15348 	/* If the link is not up, do nothing */
   15349 	if ((status & STATUS_LU) == 0)
   15350 		return 0;
   15351 
   15352 	/* Nothing to do if the link is other than 1Gbps */
   15353 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15354 		return 0;
   15355 
   15356 	for (i = 0; i < 10; i++) {
   15357 		/* read twice */
   15358 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15359 		if (rv != 0)
   15360 			return rv;
   15361 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15362 		if (rv != 0)
   15363 			return rv;
   15364 
   15365 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15366 			goto out;	/* GOOD! */
   15367 
   15368 		/* Reset the PHY */
   15369 		wm_reset_phy(sc);
   15370 		delay(5*1000);
   15371 	}
   15372 
   15373 	/* Disable GigE link negotiation */
   15374 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15375 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15376 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15377 
   15378 	/*
   15379 	 * Call gig speed drop workaround on Gig disable before accessing
   15380 	 * any PHY registers.
   15381 	 */
   15382 	wm_gig_downshift_workaround_ich8lan(sc);
   15383 
   15384 out:
   15385 	return 0;
   15386 }
   15387 
   15388 /*
   15389  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15390  *  @sc: pointer to the HW structure
   15391  *
   15392  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15393  *  LPLU, Gig disable, MDIC PHY reset):
   15394  *    1) Set Kumeran Near-end loopback
   15395  *    2) Clear Kumeran Near-end loopback
   15396  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15397  */
   15398 static void
   15399 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15400 {
   15401 	uint16_t kmreg;
   15402 
   15403 	/* Only for igp3 */
   15404 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15405 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15406 			return;
   15407 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15408 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15409 			return;
   15410 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15411 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15412 	}
   15413 }
   15414 
   15415 /*
   15416  * Workaround for pch's PHYs
   15417  * XXX should be moved to new PHY driver?
   15418  */
   15419 static int
   15420 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15421 {
   15422 	device_t dev = sc->sc_dev;
   15423 	struct mii_data *mii = &sc->sc_mii;
   15424 	struct mii_softc *child;
   15425 	uint16_t phy_data, phyrev = 0;
   15426 	int phytype = sc->sc_phytype;
   15427 	int rv;
   15428 
   15429 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15430 		device_xname(dev), __func__));
   15431 	KASSERT(sc->sc_type == WM_T_PCH);
   15432 
   15433 	/* Set MDIO slow mode before any other MDIO access */
   15434 	if (phytype == WMPHY_82577)
   15435 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15436 			return rv;
   15437 
   15438 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15439 	if (child != NULL)
   15440 		phyrev = child->mii_mpd_rev;
   15441 
   15442 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15443 	if ((child != NULL) &&
   15444 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15445 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15446 		/* Disable generation of early preamble (0x4431) */
   15447 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15448 		    &phy_data);
   15449 		if (rv != 0)
   15450 			return rv;
   15451 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15452 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15453 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15454 		    phy_data);
   15455 		if (rv != 0)
   15456 			return rv;
   15457 
   15458 		/* Preamble tuning for SSC */
   15459 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15460 		if (rv != 0)
   15461 			return rv;
   15462 	}
   15463 
   15464 	/* 82578 */
   15465 	if (phytype == WMPHY_82578) {
   15466 		/*
   15467 		 * Return registers to default by doing a soft reset then
   15468 		 * writing 0x3140 to the control register
   15469 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15470 		 */
   15471 		if ((child != NULL) && (phyrev < 2)) {
   15472 			PHY_RESET(child);
   15473 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15474 			    0x3140);
   15475 			if (rv != 0)
   15476 				return rv;
   15477 		}
   15478 	}
   15479 
   15480 	/* Select page 0 */
   15481 	if ((rv = sc->phy.acquire(sc)) != 0)
   15482 		return rv;
   15483 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15484 	sc->phy.release(sc);
   15485 	if (rv != 0)
   15486 		return rv;
   15487 
   15488 	/*
   15489 	 * Configure the K1 Si workaround during phy reset assuming there is
   15490 	 * link so that it disables K1 if link is in 1Gbps.
   15491 	 */
   15492 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15493 		return rv;
   15494 
   15495 	/* Workaround for link disconnects on a busy hub in half duplex */
   15496 	rv = sc->phy.acquire(sc);
   15497 	if (rv)
   15498 		return rv;
   15499 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15500 	if (rv)
   15501 		goto release;
   15502 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15503 	    phy_data & 0x00ff);
   15504 	if (rv)
   15505 		goto release;
   15506 
   15507 	/* Set MSE higher to enable link to stay up when noise is high */
   15508 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15509 release:
   15510 	sc->phy.release(sc);
   15511 
   15512 	return rv;
   15513 }
   15514 
   15515 /*
   15516  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15517  *  @sc:   pointer to the HW structure
   15518  */
   15519 static void
   15520 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15521 {
   15522 	device_t dev = sc->sc_dev;
   15523 	uint32_t mac_reg;
   15524 	uint16_t i, wuce;
   15525 	int count;
   15526 
   15527 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15528 		device_xname(sc->sc_dev), __func__));
   15529 
   15530 	if (sc->phy.acquire(sc) != 0)
   15531 		return;
   15532 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15533 		goto release;
   15534 
   15535 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15536 	count = wm_rar_count(sc);
   15537 	for (i = 0; i < count; i++) {
   15538 		uint16_t lo, hi;
   15539 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15540 		lo = (uint16_t)(mac_reg & 0xffff);
   15541 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15542 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15543 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15544 
   15545 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15546 		lo = (uint16_t)(mac_reg & 0xffff);
   15547 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15548 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15549 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15550 	}
   15551 
   15552 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15553 
   15554 release:
   15555 	sc->phy.release(sc);
   15556 }
   15557 
   15558 /*
   15559  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15560  *  done after every PHY reset.
   15561  */
   15562 static int
   15563 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15564 {
   15565 	device_t dev = sc->sc_dev;
   15566 	int rv;
   15567 
   15568 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15569 		device_xname(dev), __func__));
   15570 	KASSERT(sc->sc_type == WM_T_PCH2);
   15571 
   15572 	/* Set MDIO slow mode before any other MDIO access */
   15573 	rv = wm_set_mdio_slow_mode_hv(sc);
   15574 	if (rv != 0)
   15575 		return rv;
   15576 
   15577 	rv = sc->phy.acquire(sc);
   15578 	if (rv != 0)
   15579 		return rv;
   15580 	/* Set MSE higher to enable link to stay up when noise is high */
   15581 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15582 	if (rv != 0)
   15583 		goto release;
   15584 	/* Drop link after 5 times MSE threshold was reached */
   15585 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15586 release:
   15587 	sc->phy.release(sc);
   15588 
   15589 	return rv;
   15590 }
   15591 
   15592 /**
   15593  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15594  *  @link: link up bool flag
   15595  *
   15596  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15597  *  preventing further DMA write requests.  Workaround the issue by disabling
   15598  *  the de-assertion of the clock request when in 1Gpbs mode.
   15599  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15600  *  speeds in order to avoid Tx hangs.
   15601  **/
   15602 static int
   15603 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15604 {
   15605 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15606 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15607 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15608 	uint16_t phyreg;
   15609 
   15610 	if (link && (speed == STATUS_SPEED_1000)) {
   15611 		sc->phy.acquire(sc);
   15612 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15613 		    &phyreg);
   15614 		if (rv != 0)
   15615 			goto release;
   15616 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15617 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15618 		if (rv != 0)
   15619 			goto release;
   15620 		delay(20);
   15621 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15622 
   15623 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15624 		    &phyreg);
   15625 release:
   15626 		sc->phy.release(sc);
   15627 		return rv;
   15628 	}
   15629 
   15630 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15631 
   15632 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15633 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15634 	    || !link
   15635 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15636 		goto update_fextnvm6;
   15637 
   15638 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15639 
   15640 	/* Clear link status transmit timeout */
   15641 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15642 	if (speed == STATUS_SPEED_100) {
   15643 		/* Set inband Tx timeout to 5x10us for 100Half */
   15644 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15645 
   15646 		/* Do not extend the K1 entry latency for 100Half */
   15647 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15648 	} else {
   15649 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15650 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15651 
   15652 		/* Extend the K1 entry latency for 10 Mbps */
   15653 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15654 	}
   15655 
   15656 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15657 
   15658 update_fextnvm6:
   15659 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15660 	return 0;
   15661 }
   15662 
   15663 /*
   15664  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15665  *  @sc:   pointer to the HW structure
   15666  *  @link: link up bool flag
   15667  *
   15668  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15669  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15670  *  If link is down, the function will restore the default K1 setting located
   15671  *  in the NVM.
   15672  */
   15673 static int
   15674 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15675 {
   15676 	int k1_enable = sc->sc_nvm_k1_enabled;
   15677 
   15678 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15679 		device_xname(sc->sc_dev), __func__));
   15680 
   15681 	if (sc->phy.acquire(sc) != 0)
   15682 		return -1;
   15683 
   15684 	if (link) {
   15685 		k1_enable = 0;
   15686 
   15687 		/* Link stall fix for link up */
   15688 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15689 		    0x0100);
   15690 	} else {
   15691 		/* Link stall fix for link down */
   15692 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15693 		    0x4100);
   15694 	}
   15695 
   15696 	wm_configure_k1_ich8lan(sc, k1_enable);
   15697 	sc->phy.release(sc);
   15698 
   15699 	return 0;
   15700 }
   15701 
   15702 /*
   15703  *  wm_k1_workaround_lv - K1 Si workaround
   15704  *  @sc:   pointer to the HW structure
   15705  *
   15706  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15707  *  Disable K1 for 1000 and 100 speeds
   15708  */
   15709 static int
   15710 wm_k1_workaround_lv(struct wm_softc *sc)
   15711 {
   15712 	uint32_t reg;
   15713 	uint16_t phyreg;
   15714 	int rv;
   15715 
   15716 	if (sc->sc_type != WM_T_PCH2)
   15717 		return 0;
   15718 
   15719 	/* Set K1 beacon duration based on 10Mbps speed */
   15720 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15721 	if (rv != 0)
   15722 		return rv;
   15723 
   15724 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15725 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15726 		if (phyreg &
   15727 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15728 			/* LV 1G/100 Packet drop issue wa  */
   15729 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15730 			    &phyreg);
   15731 			if (rv != 0)
   15732 				return rv;
   15733 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15734 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15735 			    phyreg);
   15736 			if (rv != 0)
   15737 				return rv;
   15738 		} else {
   15739 			/* For 10Mbps */
   15740 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15741 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15742 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15743 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15744 		}
   15745 	}
   15746 
   15747 	return 0;
   15748 }
   15749 
   15750 /*
   15751  *  wm_link_stall_workaround_hv - Si workaround
   15752  *  @sc: pointer to the HW structure
   15753  *
   15754  *  This function works around a Si bug where the link partner can get
   15755  *  a link up indication before the PHY does. If small packets are sent
   15756  *  by the link partner they can be placed in the packet buffer without
   15757  *  being properly accounted for by the PHY and will stall preventing
   15758  *  further packets from being received.  The workaround is to clear the
   15759  *  packet buffer after the PHY detects link up.
   15760  */
   15761 static int
   15762 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15763 {
   15764 	uint16_t phyreg;
   15765 
   15766 	if (sc->sc_phytype != WMPHY_82578)
   15767 		return 0;
   15768 
   15769 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15770 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15771 	if ((phyreg & BMCR_LOOP) != 0)
   15772 		return 0;
   15773 
   15774 	/* Check if link is up and at 1Gbps */
   15775 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15776 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15777 	    | BM_CS_STATUS_SPEED_MASK;
   15778 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15779 		| BM_CS_STATUS_SPEED_1000))
   15780 		return 0;
   15781 
   15782 	delay(200 * 1000);	/* XXX too big */
   15783 
   15784 	/* Flush the packets in the fifo buffer */
   15785 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15786 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15787 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15788 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15789 
   15790 	return 0;
   15791 }
   15792 
   15793 static int
   15794 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15795 {
   15796 	int rv;
   15797 	uint16_t reg;
   15798 
   15799 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15800 	if (rv != 0)
   15801 		return rv;
   15802 
   15803 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15804 	    reg | HV_KMRN_MDIO_SLOW);
   15805 }
   15806 
   15807 /*
   15808  *  wm_configure_k1_ich8lan - Configure K1 power state
   15809  *  @sc: pointer to the HW structure
   15810  *  @enable: K1 state to configure
   15811  *
   15812  *  Configure the K1 power state based on the provided parameter.
   15813  *  Assumes semaphore already acquired.
   15814  */
   15815 static void
   15816 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15817 {
   15818 	uint32_t ctrl, ctrl_ext, tmp;
   15819 	uint16_t kmreg;
   15820 	int rv;
   15821 
   15822 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15823 
   15824 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15825 	if (rv != 0)
   15826 		return;
   15827 
   15828 	if (k1_enable)
   15829 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15830 	else
   15831 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15832 
   15833 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15834 	if (rv != 0)
   15835 		return;
   15836 
   15837 	delay(20);
   15838 
   15839 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15840 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15841 
   15842 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15843 	tmp |= CTRL_FRCSPD;
   15844 
   15845 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15846 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15847 	CSR_WRITE_FLUSH(sc);
   15848 	delay(20);
   15849 
   15850 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15851 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15852 	CSR_WRITE_FLUSH(sc);
   15853 	delay(20);
   15854 
   15855 	return;
   15856 }
   15857 
   15858 /* special case - for 82575 - need to do manual init ... */
   15859 static void
   15860 wm_reset_init_script_82575(struct wm_softc *sc)
   15861 {
   15862 	/*
   15863 	 * Remark: this is untested code - we have no board without EEPROM
   15864 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15865 	 */
   15866 
   15867 	/* SerDes configuration via SERDESCTRL */
   15868 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15869 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15870 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15871 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15872 
   15873 	/* CCM configuration via CCMCTL register */
   15874 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15875 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15876 
   15877 	/* PCIe lanes configuration */
   15878 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15879 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15880 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15881 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15882 
   15883 	/* PCIe PLL Configuration */
   15884 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15885 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15886 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15887 }
   15888 
   15889 static void
   15890 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15891 {
   15892 	uint32_t reg;
   15893 	uint16_t nvmword;
   15894 	int rv;
   15895 
   15896 	if (sc->sc_type != WM_T_82580)
   15897 		return;
   15898 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15899 		return;
   15900 
   15901 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15902 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15903 	if (rv != 0) {
   15904 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15905 		    __func__);
   15906 		return;
   15907 	}
   15908 
   15909 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15910 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15911 		reg |= MDICNFG_DEST;
   15912 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15913 		reg |= MDICNFG_COM_MDIO;
   15914 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15915 }
   15916 
   15917 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15918 
   15919 static bool
   15920 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15921 {
   15922 	uint32_t reg;
   15923 	uint16_t id1, id2;
   15924 	int i, rv;
   15925 
   15926 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15927 		device_xname(sc->sc_dev), __func__));
   15928 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15929 
   15930 	id1 = id2 = 0xffff;
   15931 	for (i = 0; i < 2; i++) {
   15932 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15933 		    &id1);
   15934 		if ((rv != 0) || MII_INVALIDID(id1))
   15935 			continue;
   15936 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15937 		    &id2);
   15938 		if ((rv != 0) || MII_INVALIDID(id2))
   15939 			continue;
   15940 		break;
   15941 	}
   15942 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15943 		goto out;
   15944 
   15945 	/*
   15946 	 * In case the PHY needs to be in mdio slow mode,
   15947 	 * set slow mode and try to get the PHY id again.
   15948 	 */
   15949 	rv = 0;
   15950 	if (sc->sc_type < WM_T_PCH_LPT) {
   15951 		sc->phy.release(sc);
   15952 		wm_set_mdio_slow_mode_hv(sc);
   15953 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15954 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15955 		sc->phy.acquire(sc);
   15956 	}
   15957 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15958 		printf("XXX return with false\n");
   15959 		return false;
   15960 	}
   15961 out:
   15962 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15963 		/* Only unforce SMBus if ME is not active */
   15964 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15965 			uint16_t phyreg;
   15966 
   15967 			/* Unforce SMBus mode in PHY */
   15968 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15969 			    CV_SMB_CTRL, &phyreg);
   15970 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15971 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15972 			    CV_SMB_CTRL, phyreg);
   15973 
   15974 			/* Unforce SMBus mode in MAC */
   15975 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15976 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15977 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15978 		}
   15979 	}
   15980 	return true;
   15981 }
   15982 
   15983 static void
   15984 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15985 {
   15986 	uint32_t reg;
   15987 	int i;
   15988 
   15989 	/* Set PHY Config Counter to 50msec */
   15990 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15991 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15992 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15993 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15994 
   15995 	/* Toggle LANPHYPC */
   15996 	reg = CSR_READ(sc, WMREG_CTRL);
   15997 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15998 	reg &= ~CTRL_LANPHYPC_VALUE;
   15999 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16000 	CSR_WRITE_FLUSH(sc);
   16001 	delay(1000);
   16002 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16003 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16004 	CSR_WRITE_FLUSH(sc);
   16005 
   16006 	if (sc->sc_type < WM_T_PCH_LPT)
   16007 		delay(50 * 1000);
   16008 	else {
   16009 		i = 20;
   16010 
   16011 		do {
   16012 			delay(5 * 1000);
   16013 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16014 		    && i--);
   16015 
   16016 		delay(30 * 1000);
   16017 	}
   16018 }
   16019 
   16020 static int
   16021 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16022 {
   16023 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16024 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16025 	uint32_t rxa;
   16026 	uint16_t scale = 0, lat_enc = 0;
   16027 	int32_t obff_hwm = 0;
   16028 	int64_t lat_ns, value;
   16029 
   16030 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16031 		device_xname(sc->sc_dev), __func__));
   16032 
   16033 	if (link) {
   16034 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16035 		uint32_t status;
   16036 		uint16_t speed;
   16037 		pcireg_t preg;
   16038 
   16039 		status = CSR_READ(sc, WMREG_STATUS);
   16040 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16041 		case STATUS_SPEED_10:
   16042 			speed = 10;
   16043 			break;
   16044 		case STATUS_SPEED_100:
   16045 			speed = 100;
   16046 			break;
   16047 		case STATUS_SPEED_1000:
   16048 			speed = 1000;
   16049 			break;
   16050 		default:
   16051 			device_printf(sc->sc_dev, "Unknown speed "
   16052 			    "(status = %08x)\n", status);
   16053 			return -1;
   16054 		}
   16055 
   16056 		/* Rx Packet Buffer Allocation size (KB) */
   16057 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16058 
   16059 		/*
   16060 		 * Determine the maximum latency tolerated by the device.
   16061 		 *
   16062 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16063 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16064 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16065 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16066 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16067 		 */
   16068 		lat_ns = ((int64_t)rxa * 1024 -
   16069 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16070 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16071 		if (lat_ns < 0)
   16072 			lat_ns = 0;
   16073 		else
   16074 			lat_ns /= speed;
   16075 		value = lat_ns;
   16076 
   16077 		while (value > LTRV_VALUE) {
   16078 			scale ++;
   16079 			value = howmany(value, __BIT(5));
   16080 		}
   16081 		if (scale > LTRV_SCALE_MAX) {
   16082 			printf("%s: Invalid LTR latency scale %d\n",
   16083 			    device_xname(sc->sc_dev), scale);
   16084 			return -1;
   16085 		}
   16086 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16087 
   16088 		/* Determine the maximum latency tolerated by the platform */
   16089 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16090 		    WM_PCI_LTR_CAP_LPT);
   16091 		max_snoop = preg & 0xffff;
   16092 		max_nosnoop = preg >> 16;
   16093 
   16094 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16095 
   16096 		if (lat_enc > max_ltr_enc) {
   16097 			lat_enc = max_ltr_enc;
   16098 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16099 			    * PCI_LTR_SCALETONS(
   16100 				    __SHIFTOUT(lat_enc,
   16101 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16102 		}
   16103 
   16104 		if (lat_ns) {
   16105 			lat_ns *= speed * 1000;
   16106 			lat_ns /= 8;
   16107 			lat_ns /= 1000000000;
   16108 			obff_hwm = (int32_t)(rxa - lat_ns);
   16109 		}
   16110 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16111 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16112 			    "(rxa = %d, lat_ns = %d)\n",
   16113 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16114 			return -1;
   16115 		}
   16116 	}
   16117 	/* Snoop and No-Snoop latencies the same */
   16118 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16119 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16120 
   16121 	/* Set OBFF high water mark */
   16122 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16123 	reg |= obff_hwm;
   16124 	CSR_WRITE(sc, WMREG_SVT, reg);
   16125 
   16126 	/* Enable OBFF */
   16127 	reg = CSR_READ(sc, WMREG_SVCR);
   16128 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16129 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16130 
   16131 	return 0;
   16132 }
   16133 
   16134 /*
   16135  * I210 Errata 25 and I211 Errata 10
   16136  * Slow System Clock.
   16137  */
   16138 static int
   16139 wm_pll_workaround_i210(struct wm_softc *sc)
   16140 {
   16141 	uint32_t mdicnfg, wuc;
   16142 	uint32_t reg;
   16143 	pcireg_t pcireg;
   16144 	uint32_t pmreg;
   16145 	uint16_t nvmword, tmp_nvmword;
   16146 	uint16_t phyval;
   16147 	bool wa_done = false;
   16148 	int i, rv = 0;
   16149 
   16150 	/* Get Power Management cap offset */
   16151 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16152 	    &pmreg, NULL) == 0)
   16153 		return -1;
   16154 
   16155 	/* Save WUC and MDICNFG registers */
   16156 	wuc = CSR_READ(sc, WMREG_WUC);
   16157 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16158 
   16159 	reg = mdicnfg & ~MDICNFG_DEST;
   16160 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16161 
   16162 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16163 		nvmword = INVM_DEFAULT_AL;
   16164 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16165 
   16166 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16167 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16168 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16169 
   16170 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16171 			rv = 0;
   16172 			break; /* OK */
   16173 		} else
   16174 			rv = -1;
   16175 
   16176 		wa_done = true;
   16177 		/* Directly reset the internal PHY */
   16178 		reg = CSR_READ(sc, WMREG_CTRL);
   16179 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16180 
   16181 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16182 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16183 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16184 
   16185 		CSR_WRITE(sc, WMREG_WUC, 0);
   16186 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16187 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16188 
   16189 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16190 		    pmreg + PCI_PMCSR);
   16191 		pcireg |= PCI_PMCSR_STATE_D3;
   16192 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16193 		    pmreg + PCI_PMCSR, pcireg);
   16194 		delay(1000);
   16195 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16196 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16197 		    pmreg + PCI_PMCSR, pcireg);
   16198 
   16199 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16200 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16201 
   16202 		/* Restore WUC register */
   16203 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16204 	}
   16205 
   16206 	/* Restore MDICNFG setting */
   16207 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16208 	if (wa_done)
   16209 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16210 	return rv;
   16211 }
   16212 
   16213 static void
   16214 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16215 {
   16216 	uint32_t reg;
   16217 
   16218 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16219 		device_xname(sc->sc_dev), __func__));
   16220 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16221 	    || (sc->sc_type == WM_T_PCH_CNP));
   16222 
   16223 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16224 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16225 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16226 
   16227 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16228 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16229 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16230 }
   16231